= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())":4,"import collections\nimport re\nfrom d2l import paddle as d2l":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))":6,"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))":4,"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))":4,"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))":6,"counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))":6,"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()":4,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)":2,"trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":4,"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\nX.sum()\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = paddle.zeros_like(Y)\nZ = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))":4,"x = paddle.arange(12)\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\nA.cumsum(axis=0)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()\na.grad == d / a":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)":6,"import collections\nimport re\nfrom d2l import paddle as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)":2,"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))":4}}}}],"rows":[{"rowIdx":100,"cells":{"id":{"kind":"number","value":101,"string":"101"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"},"mxnet":{"kind":"string","value":"counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)"},"paddle":{"kind":"null"}}},{"rowIdx":101,"cells":{"id":{"kind":"number","value":102,"string":"102"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"},"paddle":{"kind":"null"}}},{"rowIdx":102,"cells":{"id":{"kind":"number","value":103,"string":"103"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"},"paddle":{"kind":"null"}}},{"rowIdx":103,"cells":{"id":{"kind":"number","value":104,"string":"104"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()"},"paddle":{"kind":"null"}}},{"rowIdx":104,"cells":{"id":{"kind":"number","value":105,"string":"105"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"},"paddle":{"kind":"null"}}},{"rowIdx":105,"cells":{"id":{"kind":"number","value":106,"string":"106"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"},"mxnet":{"kind":"string","value":"from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]"},"paddle":{"kind":"null"}}},{"rowIdx":106,"cells":{"id":{"kind":"number","value":107,"string":"107"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)"},"mxnet":{"kind":"string","value":"from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})"},"paddle":{"kind":"null"}}},{"rowIdx":107,"cells":{"id":{"kind":"number","value":108,"string":"108"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"},"paddle":{"kind":"null"}}},{"rowIdx":108,"cells":{"id":{"kind":"number","value":109,"string":"109"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"},"mxnet":{"kind":"string","value":"from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))"},"paddle":{"kind":"null"}}},{"rowIdx":109,"cells":{"id":{"kind":"number","value":110,"string":"110"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"mxnet":{"kind":"string","value":"from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"paddle":{"kind":"null"}}},{"rowIdx":110,"cells":{"id":{"kind":"number","value":111,"string":"111"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"},"mxnet":{"kind":"string","value":"import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"},"paddle":{"kind":"null"}}},{"rowIdx":111,"cells":{"id":{"kind":"number","value":112,"string":"112"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"},"paddle":{"kind":"null"}}},{"rowIdx":112,"cells":{"id":{"kind":"number","value":113,"string":"113"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"paddle":{"kind":"null"}}},{"rowIdx":113,"cells":{"id":{"kind":"number","value":114,"string":"114"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))"},"mxnet":{"kind":"string","value":"trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))"},"paddle":{"kind":"null"}}},{"rowIdx":114,"cells":{"id":{"kind":"number","value":115,"string":"115"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"},"paddle":{"kind":"null"}}},{"rowIdx":115,"cells":{"id":{"kind":"number","value":116,"string":"116"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)"},"paddle":{"kind":"null"}}},{"rowIdx":116,"cells":{"id":{"kind":"number","value":117,"string":"117"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"},"mxnet":{"kind":"string","value":"from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()"},"paddle":{"kind":"null"}}},{"rowIdx":117,"cells":{"id":{"kind":"number","value":118,"string":"118"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))"},"paddle":{"kind":"null"}}},{"rowIdx":118,"cells":{"id":{"kind":"number","value":119,"string":"119"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')"},"paddle":{"kind":"null"}}},{"rowIdx":119,"cells":{"id":{"kind":"number","value":120,"string":"120"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx"},"paddle":{"kind":"null"}}},{"rowIdx":120,"cells":{"id":{"kind":"number","value":121,"string":"121"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))"},"mxnet":{"kind":"string","value":"from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))"},"paddle":{"kind":"null"}}},{"rowIdx":121,"cells":{"id":{"kind":"number","value":122,"string":"122"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"},"paddle":{"kind":"null"}}},{"rowIdx":122,"cells":{"id":{"kind":"number","value":123,"string":"123"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6"},"paddle":{"kind":"null"}}},{"rowIdx":123,"cells":{"id":{"kind":"number","value":124,"string":"124"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)"},"paddle":{"kind":"null"}}},{"rowIdx":124,"cells":{"id":{"kind":"number","value":125,"string":"125"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"},"paddle":{"kind":"null"}}},{"rowIdx":125,"cells":{"id":{"kind":"number","value":126,"string":"126"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"null"}}},{"rowIdx":126,"cells":{"id":{"kind":"number","value":127,"string":"127"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"null"}}},{"rowIdx":127,"cells":{"id":{"kind":"number","value":128,"string":"128"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"null"}}},{"rowIdx":128,"cells":{"id":{"kind":"number","value":129,"string":"129"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"paddle":{"kind":"null"}}},{"rowIdx":129,"cells":{"id":{"kind":"number","value":130,"string":"130"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))"},"mxnet":{"kind":"string","value":"from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))"},"paddle":{"kind":"null"}}},{"rowIdx":130,"cells":{"id":{"kind":"number","value":131,"string":"131"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"paddle":{"kind":"null"}}},{"rowIdx":131,"cells":{"id":{"kind":"number","value":132,"string":"132"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))"},"paddle":{"kind":"null"}}},{"rowIdx":132,"cells":{"id":{"kind":"number","value":133,"string":"133"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"},"paddle":{"kind":"null"}}},{"rowIdx":133,"cells":{"id":{"kind":"number","value":134,"string":"134"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import collections\nimport re\nfrom d2l import torch as d2l"},"mxnet":{"kind":"string","value":"import collections\nimport re\nfrom d2l import mxnet as d2l"},"paddle":{"kind":"null"}}},{"rowIdx":134,"cells":{"id":{"kind":"number","value":135,"string":"135"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"},"mxnet":{"kind":"string","value":"import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"},"paddle":{"kind":"null"}}},{"rowIdx":135,"cells":{"id":{"kind":"number","value":136,"string":"136"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))"},"paddle":{"kind":"null"}}},{"rowIdx":136,"cells":{"id":{"kind":"number","value":137,"string":"137"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])"},"paddle":{"kind":"null"}}},{"rowIdx":137,"cells":{"id":{"kind":"number","value":138,"string":"138"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)"},"paddle":{"kind":"null"}}},{"rowIdx":138,"cells":{"id":{"kind":"number","value":139,"string":"139"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"paddle":{"kind":"null"}}},{"rowIdx":139,"cells":{"id":{"kind":"number","value":140,"string":"140"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"paddle":{"kind":"null"}}},{"rowIdx":140,"cells":{"id":{"kind":"number","value":141,"string":"141"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break"},"mxnet":{"kind":"string","value":"import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break"},"paddle":{"kind":"null"}}},{"rowIdx":141,"cells":{"id":{"kind":"number","value":142,"string":"142"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"x = torch.arange(12)\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"x = paddle.arange(12)\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"}}},{"rowIdx":142,"cells":{"id":{"kind":"number","value":143,"string":"143"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"}}},{"rowIdx":143,"cells":{"id":{"kind":"number","value":144,"string":"144"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"}}},{"rowIdx":144,"cells":{"id":{"kind":"number","value":145,"string":"145"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"}}},{"rowIdx":145,"cells":{"id":{"kind":"number","value":146,"string":"146"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()"}}},{"rowIdx":146,"cells":{"id":{"kind":"number","value":147,"string":"147"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"}}},{"rowIdx":147,"cells":{"id":{"kind":"number","value":148,"string":"148"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"}}},{"rowIdx":148,"cells":{"id":{"kind":"number","value":149,"string":"149"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"}}},{"rowIdx":149,"cells":{"id":{"kind":"number","value":150,"string":"150"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"}}},{"rowIdx":150,"cells":{"id":{"kind":"number","value":151,"string":"151"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"}}},{"rowIdx":151,"cells":{"id":{"kind":"number","value":152,"string":"152"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"}}},{"rowIdx":152,"cells":{"id":{"kind":"number","value":153,"string":"153"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"}}},{"rowIdx":153,"cells":{"id":{"kind":"number","value":154,"string":"154"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"}}},{"rowIdx":154,"cells":{"id":{"kind":"number","value":155,"string":"155"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"}}},{"rowIdx":155,"cells":{"id":{"kind":"number","value":156,"string":"156"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"}}},{"rowIdx":156,"cells":{"id":{"kind":"number","value":157,"string":"157"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":157,"cells":{"id":{"kind":"number","value":158,"string":"158"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"}}},{"rowIdx":158,"cells":{"id":{"kind":"number","value":159,"string":"159"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"}}},{"rowIdx":159,"cells":{"id":{"kind":"number","value":160,"string":"160"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":160,"cells":{"id":{"kind":"number","value":161,"string":"161"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"}}},{"rowIdx":161,"cells":{"id":{"kind":"number","value":162,"string":"162"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid"}}},{"rowIdx":162,"cells":{"id":{"kind":"number","value":163,"string":"163"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"}}},{"rowIdx":163,"cells":{"id":{"kind":"number","value":164,"string":"164"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))"}}},{"rowIdx":164,"cells":{"id":{"kind":"number","value":165,"string":"165"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"}}},{"rowIdx":165,"cells":{"id":{"kind":"number","value":166,"string":"166"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"}}},{"rowIdx":166,"cells":{"id":{"kind":"number","value":167,"string":"167"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"}}},{"rowIdx":167,"cells":{"id":{"kind":"number","value":168,"string":"168"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"}}},{"rowIdx":168,"cells":{"id":{"kind":"number","value":169,"string":"169"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"}}},{"rowIdx":169,"cells":{"id":{"kind":"number","value":170,"string":"170"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"}}},{"rowIdx":170,"cells":{"id":{"kind":"number","value":171,"string":"171"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"}}},{"rowIdx":171,"cells":{"id":{"kind":"number","value":172,"string":"172"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"}}},{"rowIdx":172,"cells":{"id":{"kind":"number","value":173,"string":"173"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"}}},{"rowIdx":173,"cells":{"id":{"kind":"number","value":174,"string":"174"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"}}},{"rowIdx":174,"cells":{"id":{"kind":"number","value":175,"string":"175"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":175,"cells":{"id":{"kind":"number","value":176,"string":"176"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":176,"cells":{"id":{"kind":"number","value":177,"string":"177"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"}}},{"rowIdx":177,"cells":{"id":{"kind":"number","value":178,"string":"178"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":178,"cells":{"id":{"kind":"number","value":179,"string":"179"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"}}},{"rowIdx":179,"cells":{"id":{"kind":"number","value":180,"string":"180"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"}}},{"rowIdx":180,"cells":{"id":{"kind":"number","value":181,"string":"181"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import collections\nimport re\nfrom d2l import torch as d2l"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import collections\nimport re\nfrom d2l import paddle as d2l"}}},{"rowIdx":181,"cells":{"id":{"kind":"number","value":182,"string":"182"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"}}},{"rowIdx":182,"cells":{"id":{"kind":"number","value":183,"string":"183"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"}}},{"rowIdx":183,"cells":{"id":{"kind":"number","value":184,"string":"184"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"}}},{"rowIdx":184,"cells":{"id":{"kind":"number","value":185,"string":"185"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":185,"cells":{"id":{"kind":"number","value":186,"string":"186"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":186,"cells":{"id":{"kind":"number","value":187,"string":"187"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":187,"cells":{"id":{"kind":"number","value":188,"string":"188"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"}}},{"rowIdx":188,"cells":{"id":{"kind":"number","value":189,"string":"189"},"tensorflow":{"kind":"string","value":"x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))"},"pytorch":{"kind":"string","value":"x = torch.arange(12)\nx.numel()\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\nX.sum()\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":189,"cells":{"id":{"kind":"number","value":190,"string":"190"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)"},"pytorch":{"kind":"string","value":"import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":190,"cells":{"id":{"kind":"number","value":191,"string":"191"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))"},"pytorch":{"kind":"string","value":"import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\nA.cumsum(axis=0)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":191,"cells":{"id":{"kind":"number","value":192,"string":"192"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":192,"cells":{"id":{"kind":"number","value":193,"string":"193"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a"},"pytorch":{"kind":"string","value":"import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()\na.grad == d / a"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":193,"cells":{"id":{"kind":"number","value":194,"string":"194"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":194,"cells":{"id":{"kind":"number","value":195,"string":"195"},"tensorflow":{"kind":"string","value":"counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)"},"pytorch":{"kind":"string","value":"counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":195,"cells":{"id":{"kind":"number","value":196,"string":"196"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":196,"cells":{"id":{"kind":"number","value":197,"string":"197"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":197,"cells":{"id":{"kind":"number","value":198,"string":"198"},"tensorflow":{"kind":"string","value":"import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]"},"pytorch":{"kind":"string","value":"import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":198,"cells":{"id":{"kind":"number","value":199,"string":"199"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}},{"rowIdx":199,"cells":{"id":{"kind":"number","value":200,"string":"200"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"},"pytorch":{"kind":"string","value":"import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"},"mxnet":{"kind":"null"},"paddle":{"kind":"null"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1,"numItemsPerPage":100,"numTotalItems":564,"offset":100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTE4NTM1Niwic3ViIjoiL2RhdGFzZXRzL093b3MvQ29kZVRyYW5zT2NlYW4tY29weSIsImV4cCI6MTc1NTE4ODk1NiwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.92ZTEfjiCzsAwPn7kGLrPQ97sa7Bp40B9SunZkOS97soVJnbfdp4QxiaaYmpJCHBPPPjBlLlTZG0IF6yHhTfBA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
int64
1
564
tensorflow
stringclasses
52 values
pytorch
stringclasses
81 values
mxnet
stringclasses
66 values
paddle
stringclasses
73 values
101
null
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) cum_counts = counts.cumsum(dim=0) estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import torch a = dir(torch.distributions) help(torch.ones) torch.ones(4)
counts = np.random.multinomial(10, fair_probs, size=500) cum_counts = counts.astype(np.float32).cumsum(axis=0) estimates = cum_counts / cum_counts.sum(axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].asnumpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); from mxnet import np a = dir(np.random) help(np.ones) np.ones(4)
null
102
null
%matplotlib inline import math import time import numpy as np import torch from d2l import torch as d2l n = 10000 a = torch.ones(n) b = torch.ones(n) c = torch.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
%matplotlib inline import math import time from mxnet import np from d2l import mxnet as d2l n = 10000 a = np.ones(n) b = np.ones(n) c = np.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
103
null
%matplotlib inline import random import torch from d2l import torch as d2l def synthetic_data(w, b, num_examples): X = torch.normal(0, 1, (num_examples, len(w))) y = torch.matmul(X, w) + b y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] w = torch.normal(0, 0.01, size=(2,1), requires_grad=True) b = torch.zeros(1, requires_grad=True) def linreg(X, w, b): return torch.matmul(X, w) + b def sgd(params, lr, batch_size): with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels)
%matplotlib inline import random from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() def synthetic_data(w, b, num_examples): X = np.random.normal(0, 1, (num_examples, len(w))) y = np.dot(X, w) + b y += np.random.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = np.array(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] w = np.random.normal(0, 0.01, (2, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() def linreg(X, w, b): return np.dot(X, w) + b def sgd(params, lr, batch_size): for param in params: param[:] = param - lr * param.grad / batch_size lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with autograd.record(): l = loss(net(X, w, b), y) l.backward() sgd([w, b], lr, batch_size) train_l = loss(net(features, w, b), labels)
null
104
null
import numpy as np import torch from torch.utils import data from d2l import torch as d2l true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from torch import nn net = nn.Sequential(nn.Linear(2, 1)) net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) loss = nn.MSELoss() trainer = torch.optim.SGD(net.parameters(), lr=0.03) w = net[0].weight.data b = net[0].bias.data
from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = gluon.data.ArrayDataset(*data_arrays) return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from mxnet.gluon import nn net = nn.Sequential() net.add(nn.Dense(1)) from mxnet import init net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.L2Loss() from mxnet import gluon trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03}) w = net[0].weight.data() b = net[0].bias.data()
null
105
null
%matplotlib inline import torch import torchvision from torch.utils import data from torchvision import transforms from d2l import torch as d2l d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = torchvision.datasets.FashionMNIST( root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST( root="../data", train=False, transform=trans, download=True) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if torch.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(data.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
%matplotlib inline import sys from mxnet import gluon from d2l import mxnet as d2l d2l.use_svg_display() mnist_train = gluon.data.vision.FashionMNIST(train=True) mnist_test = gluon.data.vision.FashionMNIST(train=False) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.asnumpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = mnist_train[:18] show_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 0 if sys.platform.startswith('win') else 4 transformer = gluon.data.vision.transforms.ToTensor() train_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): dataset = gluon.data.vision trans = [dataset.transforms.ToTensor()] if resize: trans.insert(0, dataset.transforms.Resize(resize)) trans = dataset.transforms.Compose(trans) mnist_train = dataset.FashionMNIST(train=True).transform_first(trans) mnist_test = dataset.FashionMNIST(train=False).transform_first(trans) return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
106
null
import torch from IPython import display from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = torch.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = torch.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b) y = torch.tensor([0, 2]) y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - torch.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.type(y.dtype) == y return float(cmp.type(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, torch.nn.Module): net.eval() metric = Accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
from IPython import display from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = np.random.normal(0, 0.01, (num_inputs, num_outputs)) b = np.zeros(num_outputs) W.attach_grad() b.attach_grad() X = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdims=True), X.sum(1, keepdims=True) def softmax(X): X_exp = np.exp(X) partition = X_exp.sum(1, keepdims=True) return X_exp / partition X = np.random.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b) y = np.array([0, 2]) y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - np.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) if isinstance(updater, gluon.Trainer): updater = updater.step for X, y in train_iter: with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.size) return metric[0] / metric[2], metric[1] / metric[2]
null
107
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=0.1)
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential() net.add(nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
null
108
null
%matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
%matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.relu(x) d2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) with autograd.record(): y = npx.sigmoid(x) d2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) with autograd.record(): y = np.tanh(x) d2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
null
109
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = nn.Parameter(torch.randn( num_inputs, num_hiddens, requires_grad=True) * 0.01) b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True)) W2 = nn.Parameter(torch.randn( num_hiddens, num_outputs, requires_grad=True) * 0.01) b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True)) params = [W1, b1, W2, b2] def relu(X): a = torch.zeros_like(X) return torch.max(X, a) def net(X): X = X.reshape((-1, num_inputs)) H = relu(X@W1 + b1) return (H@W2 + b2) loss = nn.CrossEntropyLoss(reduction='none') num_epochs, lr = 10, 0.1 updater = torch.optim.SGD(params, lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
from mxnet import gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens)) b1 = np.zeros(num_hiddens) W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs)) b2 = np.zeros(num_outputs) params = [W1, b1, W2, b2] for param in params: param.attach_grad() def relu(X): return np.maximum(X, 0) def net(X): X = X.reshape((-1, num_inputs)) H = relu(np.dot(X, W1) + b1) return np.dot(H, W2) + b2 loss = gluon.loss.SoftmaxCrossEntropyLoss() num_epochs, lr = 10, 0.1 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))
null
110
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu'), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) batch_size, lr, num_epochs = 256, 0.1, 10 loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
111
null
import math import numpy as np import torch from torch import nn from d2l import torch as d2l true_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: out = net(X) y = y.reshape(out.shape) l = loss(out, y) metric.add(l.sum(), l.numel()) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss(reduction='none') input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False) trainer = torch.optim.SGD(net.parameters(), lr=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
import math from mxnet import gluon, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(l.sum(), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = gluon.loss.L2Loss() net = nn.Sequential() net.add(nn.Dense(1, use_bias=False)) net.initialize() batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01}) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
null
112
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return torch.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential(nn.Linear(num_inputs, 1)) for param in net.parameters(): param.data.normal_() loss = nn.MSELoss(reduction='none') num_epochs, lr = 100, 0.003 trainer = torch.optim.SGD([{"params":net[0].weight,'weight_decay': wd}, {"params":net[0].bias}], lr=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.mean().backward() trainer.step() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = np.random.normal(scale=1, size=(num_inputs, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() return [w, b] def l2_penalty(w): return (w**2).sum() / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) + lambd * l2_penalty(w) l.backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=1)) loss = gluon.loss.L2Loss() num_epochs, lr = 100, 0.003 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd}) net.collect_params('.*bias').setattr('wd_mult', 0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
113
null
import torch from torch import nn from d2l import torch as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return torch.zeros_like(X) if dropout == 0: return X mask = (torch.rand(X.shape) > dropout).float() return mask * X / (1.0 - dropout) X= torch.arange(16, dtype = torch.float32).reshape((2, 8)) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return np.zeros_like(X) if dropout == 0: return X mask = np.random.uniform(0, 1, X.shape) > dropout return mask.astype(np.float32) * X / (1.0 - dropout) X = np.arange(16).reshape(2, 8) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1)) b1 = np.zeros(num_hiddens1) W2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2)) b2 = np.zeros(num_hiddens2) W3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs)) b3 = np.zeros(num_outputs) params = [W1, b1, W2, b2, W3, b3] for param in params: param.attach_grad() dropout1, dropout2 = 0.2, 0.5 def net(X): X = X.reshape(-1, num_inputs) H1 = npx.relu(np.dot(X, W1) + b1) if autograd.is_training(): H1 = dropout_layer(H1, dropout1) H2 = npx.relu(np.dot(H1, W2) + b2) if autograd.is_training(): H2 = dropout_layer(H2, dropout2) return np.dot(H2, W3) + b3 num_epochs, lr, batch_size = 10, 0.5, 256 loss = gluon.loss.SoftmaxCrossEntropyLoss() train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size)) net = nn.Sequential() net.add(nn.Dense(256, activation="relu"), nn.Dropout(dropout1), nn.Dense(256, activation="relu"), nn.Dropout(dropout2), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
114
null
trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.sigmoid(x) y.backward(torch.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = torch.normal(0, 1, size=(4,4)) for i in range(100): M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.sigmoid(x) y.backward() d2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = np.random.normal(size=(4, 4)) for i in range(100): M = np.dot(M, np.random.normal(size=(4, 4)))
null
115
null
%matplotlib inline import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l n_train = train_data.shape[0] train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32) test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32) train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32) loss = nn.MSELoss() in_features = train_features.shape[1] def get_net(): net = nn.Sequential(nn.Linear(in_features,1)) return net def log_rmse(net, features, labels): clipped_preds = torch.clamp(net(features), 1, float('inf')) rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay) for epoch in range(num_epochs): for X, y in train_iter: optimizer.zero_grad() l = loss(net(X), y) l.backward() optimizer.step() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = torch.cat([X_train, X_part], 0) y_train = torch.cat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).detach().numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
%matplotlib inline import pandas as pd from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train = train_data.shape[0] train_features = np.array(all_features[:n_train].values, dtype=np.float32) test_features = np.array(all_features[n_train:].values, dtype=np.float32) train_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32) loss = gluon.loss.L2Loss() def get_net(): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize() return net def log_rmse(net, features, labels): clipped_preds = np.clip(net(features), 1, float('inf')) return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean()) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = np.concatenate([X_train, X_part], 0) y_train = np.concatenate([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).asnumpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
null
116
null
import torch from torch import nn from torch.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = torch.rand(2, 20) net(X) class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) net = MLP() net(X) class MySequential(nn.Module): def __init__(self, *args): super().__init__() for idx, module in enumerate(args): self._modules[str(idx)] = module def forward(self, X): for block in self._modules.values(): X = block(X) return X net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) net(X) class FixedHiddenMLP(nn.Module): def __init__(self): super().__init__() self.rand_weight = torch.rand((20, 20), requires_grad=False) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(torch.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() net = FixedHiddenMLP() net(X) class NestMLP(nn.Module): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() X = np.random.uniform(size=(2, 20)) net(X) class MLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.out = nn.Dense(10) def forward(self, X): return self.out(self.hidden(X)) net = MLP() net.initialize() net(X) class MySequential(nn.Block): def add(self, block): self._children[block.name] = block def forward(self, X): for block in self._children.values(): X = block(X) return X net = MySequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() net(X) class FixedHiddenMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20))) self.dense = nn.Dense(20, activation='relu') def forward(self, X): X = self.dense(X) X = npx.relu(np.dot(X, self.rand_weight.data()) + 1) X = self.dense(X) while np.abs(X).sum() > 1: X /= 2 return X.sum() net = FixedHiddenMLP() net.initialize() net(X) class NestMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu')) self.dense = nn.Dense(16, activation='relu') def forward(self, X): return self.dense(self.net(X)) chimera = nn.Sequential() chimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP()) chimera.initialize() chimera(X)
null
117
null
import torch from torch import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = torch.rand(size=(2, 4)) net(X) net.state_dict()['2.bias'].data def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_module(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, mean=0, std=0.01) nn.init.zeros_(m.bias) net.apply(init_normal) net[0].weight.data[0], net[0].bias.data[0] def init_constant(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 1) nn.init.zeros_(m.bias) net.apply(init_constant) net[0].weight.data[0], net[0].bias.data[0] def init_xavier(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def init_42(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 42) net[0].apply(init_xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: nn.init.uniform_(m.weight, -10, 10) m.weight.data *= m.weight.data.abs() >= 5 net.apply(my_init) net[0].weight[:2] net[0].weight.data[:] += 1 net[0].weight.data[0, 0] = 42 net[0].weight.data[0] layer = CenteredLayer() layer(torch.FloatTensor([1, 2, 3, 4, 5])) net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
from mxnet import init, np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(8, activation='relu')) net.add(nn.Dense(1)) net.initialize() X = np.random.uniform(size=(2, 4)) net(X) net.collect_params()['dense1_bias'].data() def block1(): net = nn.Sequential() net.add(nn.Dense(32, activation='relu')) net.add(nn.Dense(16, activation='relu')) return net def block2(): net = nn.Sequential() for _ in range(4): net.add(block1()) return net rgnet = nn.Sequential() rgnet.add(block2()) rgnet.add(nn.Dense(10)) rgnet.initialize() rgnet(X) net.initialize(init=init.Normal(sigma=0.01), force_reinit=True) net[0].weight.data()[0] net.initialize(init=init.Constant(1), force_reinit=True) net[0].weight.data()[0] net[0].weight.initialize(init=init.Xavier(), force_reinit=True) net[1].initialize(init=init.Constant(42), force_reinit=True) class MyInit(init.Initializer): def _init_weight(self, name, data): data[:] = np.random.uniform(-10, 10, data.shape) data *= np.abs(data) >= 5 net.initialize(MyInit(), force_reinit=True) net[0].weight.data()[:2] net[0].weight.data()[:] += 1 net[0].weight.data()[0, 0] = 42 net[0].weight.data()[0] layer = CenteredLayer() layer(np.array([1, 2, 3, 4, 5])) net = nn.Sequential() net.add(nn.Dense(128), CenteredLayer()) net.initialize()
null
118
null
import torch import torch.nn.functional as F from torch import nn class CenteredLayer(nn.Module): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(torch.rand(4, 8)) Y.mean() class MyLinear(nn.Module): def __init__(self, in_units, units): super().__init__() self.weight = nn.Parameter(torch.randn(in_units, units)) self.bias = nn.Parameter(torch.randn(units,)) def forward(self, X): linear = torch.matmul(X, self.weight.data) + self.bias.data return F.relu(linear) linear = MyLinear(5, 3) linear.weight linear(torch.rand(2, 5)) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(torch.rand(2, 64))
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() class CenteredLayer(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) def forward(self, X): return X - X.mean() Y = net(np.random.uniform(size=(4, 8))) Y.mean() class MyDense(nn.Block): def __init__(self, units, in_units, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=(in_units, units)) self.bias = self.params.get('bias', shape=(units,)) def forward(self, x): linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data( ctx=x.ctx) return npx.relu(linear) dense = MyDense(units=3, in_units=5) dense.params dense.initialize() dense(np.random.uniform(size=(2, 5))) net = nn.Sequential() net.add(MyDense(8, in_units=64), MyDense(1, in_units=8)) net.initialize() net(np.random.uniform(size=(2, 64)))
null
119
null
import torch from torch import nn from torch.nn import functional as F x = torch.arange(4) torch.save(x, 'x-file') x2 = torch.load('x-file') y = torch.zeros(4) torch.save([x, y],'x-files') x2, y2 = torch.load('x-files') mydict = {'x': x, 'y': y} torch.save(mydict, 'mydict') mydict2 = torch.load('mydict') class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = torch.randn(size=(2, 20)) Y = net(X) torch.save(net.state_dict(), 'mlp.params') clone = MLP() clone.load_state_dict(torch.load('mlp.params')) clone.eval()
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() x = np.arange(4) npx.save('x-file', x) x2 = npx.load('x-file') y = np.zeros(4) npx.save('x-files', [x, y]) x2, y2 = npx.load('x-files') mydict = {'x': x, 'y': y} npx.save('mydict', mydict) mydict2 = npx.load('mydict') class MLP(nn.Block): def __init__(self, **kwargs): super(MLP, self).__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.output = nn.Dense(10) def forward(self, x): return self.output(self.hidden(x)) net = MLP() net.initialize() X = np.random.uniform(size=(2, 20)) Y = net(X) net.save_parameters('mlp.params') clone = MLP() clone.load_parameters('mlp.params')
null
120
null
import torch from torch import nn torch.device('cpu'), torch.device('cuda'), torch.device('cuda:1') torch.cuda.device_count() def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return devices = [torch.device(f'cuda:{i}') return torch.device('cpu') def try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] return devices if devices else [torch.device('cpu')] try_gpu(), try_gpu(10), try_all_gpus() x = torch.tensor([1, 2, 3]) x.device X = torch.ones(2, 3, device=try_gpu()) Y = torch.rand(2, 3, device=try_gpu(1)) Z = X.cuda(1) Z.cuda(1) is Z net = nn.Sequential(nn.Linear(3, 1)) net = net.to(device=try_gpu()) net[0].weight.data.device
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() npx.cpu(), npx.gpu(), npx.gpu(1) npx.num_gpus() def try_gpu(i=0): return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu() def try_all_gpus(): devices = [npx.gpu(i) for i in range(npx.num_gpus())] return devices if devices else [npx.cpu()] try_gpu(), try_gpu(10), try_all_gpus() x = np.array([1, 2, 3]) x.ctx X = np.ones((2, 3), ctx=try_gpu()) Y = np.random.uniform(size=(2, 3), ctx=try_gpu(1)) Z = X.copyto(try_gpu(1)) Z.as_in_ctx(try_gpu(1)) is Z net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(ctx=try_gpu()) net[0].weight.data().ctx
null
121
null
import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 K = torch.tensor([[1.0, -1.0]]) corr2d(X.t(), K) conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() conv2d.weight.data[:] -= lr * conv2d.weight.grad conv2d.weight.data.reshape((1, 2))
from mxnet import autograd, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def corr2d(X, K): h, w = K.shape Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = np.array([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Block): def __init__(self, kernel_size, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=kernel_size) self.bias = self.params.get('bias', shape=(1,)) def forward(self, x): return corr2d(x, self.weight.data()) + self.bias.data() X = np.ones((6, 8)) X[:, 2:6] = 0 K = np.array([[1.0, -1.0]]) corr2d(d2l.transpose(X), K) conv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False) conv2d.initialize() X = X.reshape(1, 1, 6, 8) Y = Y.reshape(1, 1, 6, 7) lr = 3e-2 for i in range(10): with autograd.record(): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 l.backward() conv2d.weight.data()[:] -= lr * conv2d.weight.grad() conv2d.weight.data().reshape((1, 2))
null
122
null
import torch from torch import nn def comp_conv2d(conv2d, X): X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() def comp_conv2d(conv2d, X): conv2d.initialize() X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(1, kernel_size=3, padding=1) X = np.random.uniform(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4)) comp_conv2d(conv2d, X).shape
null
123
null
import torch from d2l import torch as d2l def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return torch.stack([corr2d_multi_in(X, k) for k in K], 0) K = torch.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = torch.matmul(K, X) return Y.reshape((c_o, h, w)) X = torch.normal(0, 1, (3, 3, 3)) K = torch.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return np.stack([corr2d_multi_in(X, k) for k in K], 0) K = np.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = np.dot(K, X) return Y.reshape((c_o, h, w)) X = np.random.normal(0, 1, (3, 3, 3)) K = np.random.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(np.abs(Y1 - Y2).sum()) < 1e-6
null
124
null
import torch from torch import nn from d2l import torch as d2l def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2d(3) pool2d(X) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1)) pool2d(X) X = torch.cat((X, X + 1), 1) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3)) pool2d(X) X = np.concatenate((X, X + 1), 1) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X)
null
125
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ',X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120, activation='sigmoid'), nn.Dense(84, activation='sigmoid'), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 28, 28)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): net.initialize(force_reinit=True, ctx=device, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) for i, (X, y) in enumerate(train_iter): timer.start() X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() trainer.step(X.shape[0]) metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
126
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add( nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
127
null
import torch from torch import nn from d2l import torch as d2l def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2d(kernel_size=2,stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential( *conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = torch.randn(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def vgg_block(num_convs, num_channels): blk = nn.Sequential() for _ in range(num_convs): blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu')) blk.add(nn.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = nn.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) return net net = vgg(conv_arch) net.initialize() X = np.random.uniform(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.name, 'output shape: ', X.shape)
null
128
null
import torch from torch import nn from d2l import torch as d2l def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2d(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2d(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2d(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding=1), nn.GlobalAvgPool2D(), nn.Flatten()) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
129
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Inception(nn.Module): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return torch.cat((p1, p2, p3, p4), dim=1) b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2d((1,1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = torch.rand(size=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Inception(nn.Block): def __init__(self, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu') self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu') self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu') self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu') self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu') self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1) self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu') def forward(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return np.concatenate((p1, p2, p3, p4), axis=1) b1 = nn.Sequential() b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b2 = nn.Sequential() b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'), nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b3 = nn.Sequential() b3.add(Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b4 = nn.Sequential() b4.add(Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b5 = nn.Sequential() b5.add(Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), nn.GlobalAvgPool2D()) net = nn.Sequential() net.add(b1, b2, b3, b4, b5, nn.Dense(10)) X = np.random.uniform(size=(1, 1, 96, 96)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
130
null
import torch from torch import nn from d2l import torch as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not torch.is_grad_enabled(): X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) X_hat = (X - mean) / torch.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)) net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))
from mxnet import autograd, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not autograd.is_training(): X_hat = (X - moving_mean) / np.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(axis=0) var = ((X - mean) ** 2).mean(axis=0) else: mean = X.mean(axis=(0, 2, 3), keepdims=True) var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True) X_hat = (X - mean) / np.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Block): def __init__(self, num_features, num_dims, **kwargs): super().__init__(**kwargs) if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.params.get('gamma', shape=shape, init=init.One()) self.beta = self.params.get('beta', shape=shape, init=init.Zero()) self.moving_mean = np.zeros(shape) self.moving_var = np.ones(shape) def forward(self, X): if self.moving_mean.ctx != X.ctx: self.moving_mean = self.moving_mean.copyto(X.ctx) self.moving_var = self.moving_var.copyto(X.ctx) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma.data(), self.beta.data(), self.moving_mean, self.moving_var, eps=1e-12, momentum=0.9) return Y net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), BatchNorm(120, num_dims=2), nn.Activation('sigmoid'), nn.Dense(84), BatchNorm(84, num_dims=2), nn.Activation('sigmoid'), nn.Dense(10)) net[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,) net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(84), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(10))
null
131
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Residual(nn.Module): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3,3) X = torch.rand(4, 3, 6, 6) Y = blk(X) Y.shape blk = Residual(3,6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(), nn.Linear(512, 10)) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Residual(nn.Block): def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs): super().__init__(**kwargs) self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides) self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm() self.bn2 = nn.BatchNorm() def forward(self, X): Y = npx.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return npx.relu(Y + X) blk = Residual(3) blk.initialize() X = np.random.uniform(size=(4, 3, 6, 6)) blk(X).shape blk = Residual(6, use_1x1conv=True, strides=2) blk.initialize() blk(X).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
132
null
import torch from torch import nn from d2l import torch as d2l def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Module): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = torch.cat((X, Y), dim=1) return X blk = DenseBlock(2, 3, 10) X = torch.randn(4, 3, 8, 8) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=1), nn.AvgPool2d(kernel_size=2, stride=2)) blk = transition_block(23, 10) blk(Y).shape b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] blks = [] for i, num_convs in enumerate(num_convs_in_dense_blocks): blks.append(DenseBlock(num_convs, num_channels, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: blks.append(transition_block(num_channels, num_channels // 2)) num_channels = num_channels // 2 net = nn.Sequential( b1, *blks, nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) X = np.concatenate((X, Y), axis=1) return X blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk blk = transition_block(10) blk.initialize() blk(Y).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10))
null
133
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() T = 1000 time = np.arange(1, T + 1, dtype=np.float32) x = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = np.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = nn.Sequential() net.add(nn.Dense(10, activation='relu'), nn.Dense(1)) net.initialize(init.Xavier()) return net loss = gluon.loss.L2Loss() def train(net, train_iter, loss, epochs, lr): trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) for epoch in range(epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.asnumpy(), onestep_preds.asnumpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = np.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.asnumpy(), onestep_preds.asnumpy(), multistep_preds[n_train + tau:].asnumpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = np.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
134
null
import collections import re from d2l import torch as d2l
import collections import re from d2l import mxnet as d2l
null
135
null
import random import torch from d2l import torch as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield torch.tensor(X), torch.tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = torch.tensor(corpus[offset: offset + num_tokens]) Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield np.array(X), np.array(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = np.array(corpus[offset: offset + num_tokens]) Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
136
null
import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh) torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() X, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4)) H, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4)) np.dot(X, W_xh) + np.dot(H, W_hh) np.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))
null
137
null
%matplotlib inline import math import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(torch.tensor([0, 2]), len(vocab)) X = torch.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = torch.zeros(num_hiddens, device=device) W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h) Y = torch.mm(H, W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size).type(torch.float32) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hiddens, device) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.to(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): state.detach_() else: for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl])
%matplotlib inline import math from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) npx.one_hot(np.array([0, 2]), len(vocab)) X = np.arange(10).reshape((2, 5)) npx.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = np.zeros(num_hiddens, ctx=device) W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_rnn_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = npx.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, ctx): return self.init_state(batch_size, self.num_hiddens, ctx) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.as_in_context(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, ctx=device) outputs = [vocab[prefix[0]]] get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, gluon.Block): params = [p.data() for p in net.collect_params().values()] else: params = net.params norm = math.sqrt(sum((p.grad ** 2).sum() for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], ctx=device) else: for s in state: s.detach() y = Y.T.reshape(-1) X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat, state = net(X, state) l = loss(y_hat, y).mean() l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = gluon.loss.SoftmaxCrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, gluon.Block): net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) updater = lambda batch_size: trainer.step(batch_size) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl])
null
138
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.RNN(len(vocab), num_hiddens) state = torch.zeros((1, batch_size, num_hiddens)) state.shape X = torch.rand(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) net = net.to(device) d2l.predict_ch8('time traveller', 10, net, vocab, device)
from mxnet import np, npx from mxnet.gluon import nn, rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = rnn.RNN(num_hiddens) rnn_layer.initialize() state = rnn_layer.begin_state(batch_size=batch_size) len(state), state[0].shape X = np.random.uniform(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(nn.Block): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = nn.Dense(vocab_size) def forward(self, inputs, state): X = npx.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.dense(Y.reshape(-1, Y.shape[-1])) return output, state def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) device = d2l.try_gpu() net = RNNModel(rnn_layer, len(vocab)) net.initialize(force_reinit=True, ctx=device) d2l.predict_ch8('time traveller', 10, net, vocab, device)
null
139
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_gru_state(batch_size, num_hiddens, device): return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z) R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r) H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) gru_layer = rnn.GRU(num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
140
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.attach_grad() return params def init_lstm_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i) F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f) O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o) C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * np.tanh(C) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H, C) lstm_layer = rnn.LSTM(num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
141
null
import os import torch from d2l import torch as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.type(torch.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.type(torch.int32)) print('Valid length of Y:', Y_valid_len) break
import os from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = np.array([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(np.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(np.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.astype(np.int32)) print('Valid length of Y:', Y_valid_len) break
null
142
null
x = torch.arange(12) X = x.reshape(3, 4) torch.zeros((2, 3, 4)) torch.ones((2, 3, 4)) torch.randn(3, 4) torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y torch.exp(x) X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) Z = torch.zeros_like(Y) Z[:] = X + Y A = X.numpy() B = torch.tensor(A) a = torch.tensor([3.5]) print(a, a.item(), float(a), int(a))
null
x = paddle.arange(12) X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) X = paddle.arange(12, dtype='float32').reshape((3, 4)) Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1) a = paddle.reshape(paddle.arange(3), (3, 1)) b = paddle.reshape(paddle.arange(2), (1, 2)) Z = paddle.zeros_like(Y) Z = X + Y A = X.numpy() B = paddle.to_tensor(A) type(A), type(B) a = paddle.to_tensor([3.5]) a, a.item(), float(a), int(a)
143
null
import torch X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
144
null
import torch x = torch.tensor(3.0) y = torch.tensor(2.0) print(x + y, x * y, x / y, x**y) x = torch.arange(4) A = torch.arange(20).reshape(5, 4) A.T B = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = torch.arange(24).reshape(2, 3, 4) A = torch.arange(20, dtype=torch.float32).reshape(5, 4) B = A.clone() print(A, A + B) a = 2 X = torch.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = torch.arange(4, dtype=torch.float32) print(x, x.sum()) a = A.sum() A.mean() A.sum() / A.numel() A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) y = torch.ones(4, dtype = torch.float32) print(torch.dot(x, y)) torch.sum(x * y) A.shape, x.shape, torch.mv(A, x) B = torch.ones(4, 3) torch.mm(A, B) u = torch.tensor([3.0, -4.0]) torch.norm(u) torch.abs(u).sum() torch.norm(torch.ones((4, 9)))
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle.transpose(B, perm=[1, 0]) X = paddle.reshape(paddle.arange(24), (2, 3, 4)) A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4)) B = A.clone() A, A + B a = 2 X = paddle.reshape(paddle.arange(24), (2, 3, 4)) a + X, (a * X).shape x = paddle.arange(4, dtype=paddle.float32) print(x, x.sum()) A.shape, A.sum() A.mean(), A.sum() / A.numel() A.mean(axis=0), A.sum(axis=0) / A.shape[0] sum_A = paddle.sum(A, axis=1, keepdim=True) y = paddle.ones(shape=[4], dtype='float32') x, y, paddle.dot(x, y) paddle.sum(x * y) A.shape, x.shape, paddle.mv(A, x) B = paddle.ones(shape=[4, 3], dtype='float32') paddle.mm(A, B) u = paddle.to_tensor([3.0, -4.0]) paddle.norm(u) paddle.abs(u).sum() paddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))
145
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
146
null
import torch x = torch.arange(4.0) x.requires_grad_(True) x.grad y = 2 * torch.dot(x, x) x.grad.zero_() y = x.sum() y.backward() x.grad x.grad.zero_() y = x * x y.sum().backward() x.grad x.grad.zero_() y = x * x u = y.detach() z = u * x z.sum().backward() x.grad == u x.grad.zero_() y.sum().backward() x.grad == 2 * x def f(a): b = a * 2 while b.norm() < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = torch.randn(size=(), requires_grad=True) d = f(a) d.backward()
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backward() x.grad x.clear_gradient() y = x * x u = y.detach() z = u * x paddle.sum(z).backward() x.grad == u x.clear_gradient() paddle.sum(y).backward() x.grad == 2 * x def f(a): b = a * 2 while paddle.norm(b) < 1000: b = b * 2 if paddle.sum(b) > 0: c = b else: c = 100 * b return c a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False) d = f(a) d.backward()
147
null
%matplotlib inline import torch from torch.distributions import multinomial from d2l import torch as d2l fair_probs = torch.ones([6]) / 6 multinomial.Multinomial(1, fair_probs).sample() multinomial.Multinomial(10, fair_probs).sample() counts = multinomial.Multinomial(1000, fair_probs).sample()
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000 counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000
148
null
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) cum_counts = counts.cumsum(dim=0) estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import torch a = dir(torch.distributions) help(torch.ones) torch.ones(4)
null
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend() import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle help(paddle.ones) paddle.ones([4], dtype='float32')
149
null
%matplotlib inline import math import time import numpy as np import torch from d2l import torch as d2l n = 10000 a = torch.ones(n) b = torch.ones(n) c = torch.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
150
null
%matplotlib inline import random import torch from d2l import torch as d2l def synthetic_data(w, b, num_examples): X = torch.normal(0, 1, (num_examples, len(w))) y = torch.matmul(X, w) + b y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): print(X, ' ', y) break w = torch.normal(0, 0.01, size=(2,1), requires_grad=True) b = torch.zeros(1, requires_grad=True) def linreg(X, w, b): return torch.matmul(X, w) + b def sgd(params, lr, batch_size): with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels)
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): break w = paddle.normal(0, 0.01, shape=(2,1)) b = paddle.zeros(shape=[1]) w.stop_gradient = False b.stop_gradient = False def linreg(X, w, b): return paddle.matmul(X, w) + b with paddle.no_grad(): for i, param in enumerate(params): param -= lr * params[i].grad / batch_size params[i].set_value(param) params[i].clear_gradient() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with paddle.no_grad(): train_l = loss(net(features, w, b), labels)
151
null
import numpy as np import torch from torch.utils import data from d2l import torch as d2l true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from torch import nn net = nn.Sequential(nn.Linear(2, 1)) net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) trainer = torch.optim.SGD(net.parameters(), lr=0.03) w = net[0].weight.data b = net[0].bias.data
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(data_arrays) return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True) batch_size = 10 data_iter = load_array((features, labels), batch_size) from paddle import nn net = nn.Sequential(nn.Linear(2, 1)) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01)) bias_attr = paddle.ParamAttr(initializer=None) net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters()) w = net[0].weight b = net[0].bias
152
null
%matplotlib inline import torch import torchvision from torch.utils import data from torchvision import transforms from d2l import torch as d2l d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = torchvision.datasets.FashionMNIST( root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST( root="../data", train=False, transform=trans, download=True) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if torch.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(data.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if paddle.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()) trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()), paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))
153
null
import torch from IPython import display from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = torch.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = torch.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b) y = torch.tensor([0, 2]) y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - torch.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.type(y.dtype) == y return float(cmp.type(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, torch.nn.Module): net.eval() metric = Accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=(num_outputs,)) W.stop_gradient=False b.stop_gradient=False X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = paddle.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = paddle.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b) y = paddle.to_tensor([0, 2]) y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) if len(y_hat.shape) < len(y.shape): cmp = y_hat.astype(y.dtype) == y.squeeze() else: cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, paddle.nn.Layer): net.eval() metric = Accumulator(2) with paddle.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, paddle.nn.Layer): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
154
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=0.1)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initializer.Normal(m.weight, std=0.01) net.apply(init_weights); trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())
155
null
%matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = paddle.nn.functional.sigmoid(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = paddle.tanh(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
156
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = nn.Parameter(torch.randn( num_inputs, num_hiddens, requires_grad=True) * 0.01) b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True)) W2 = nn.Parameter(torch.randn( num_hiddens, num_outputs, requires_grad=True) * 0.01) b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True)) params = [W1, b1, W2, b2] def relu(X): a = torch.zeros_like(X) return torch.max(X, a) num_epochs, lr = 10, 0.1 updater = torch.optim.SGD(params, lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = False b1 = paddle.zeros([num_hiddens]) b1.stop_gradient = False W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01 W2.stop_gradient = False b2 = paddle.zeros([num_outputs]) b2.stop_gradient = False params = [W1, b1, W2, b2] def relu(X): a = paddle.zeros_like(X) return paddle.maximum(X, a) num_epochs, lr = 10, 0.1 updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
157
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01)) layer.weight_attr = weight_attr batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
158
null
import math import numpy as np import torch from torch import nn from d2l import torch as d2l true_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss(reduction='none') input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False) trainer = torch.optim.SGD(net.parameters(), lr=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss() input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False) trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
159
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return torch.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential(nn.Linear(num_inputs, 1)) for param in net.parameters(): param.data.normal_() loss = nn.MSELoss(reduction='none') num_epochs, lr = 100, 0.003 trainer = torch.optim.SGD([{"params":net[0].weight,'weight_decay': wd}, {"params":net[0].bias}], lr=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.mean().backward() trainer.step() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = paddle.normal(0, 1, shape=(num_inputs, 1)) w.stop_gradient = False b = paddle.zeros(shape=[1]) b.stop_gradient = False return [w, b] def l2_penalty(w): return paddle.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter(): l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() num_epochs, lr = 100, 0.003 trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() trainer.step() trainer.clear_grad() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
160
null
import torch from torch import nn from d2l import torch as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return torch.zeros_like(X) if dropout == 0: return X mask = (torch.rand(X.shape) > dropout).float() return mask * X / (1.0 - dropout) X= torch.arange(16, dtype = torch.float32).reshape((2, 8)) dropout1, dropout2 = 0.2, 0.5 class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) if dropout == 0: return X mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32') return mask * X / (1.0 - dropout) X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8)) dropout1, dropout2 = 0.2, 0.5 class Net(nn.Layer): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01)) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10, weight_attr=weight_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
161
null
trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.sigmoid(x) y.backward(torch.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = torch.normal(0, 1, size=(4,4)) for i in range(100): M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))
null
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.sigmoid(x) y.backward(paddle.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = paddle.normal(0, 1, shape=(4,4)) for i in range(100): M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))
162
null
%matplotlib inline import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l n_train = train_data.shape[0] train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32) test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32) train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32) def log_rmse(net, features, labels): clipped_preds = torch.clamp(net(features), 1, float('inf')) rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay) for epoch in range(num_epochs): for X, y in train_iter: optimizer.zero_grad() l = loss(net(X), y) l.backward() optimizer.step() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = torch.cat([X_train, X_part], 0) y_train = torch.cat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid
null
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32) test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32) train_labels = paddle.to_tensor( train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32) def log_rmse(net, features, labels): clipped_preds = paddle.clip(net(features), 1, float('inf')) rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() optimizer.step() optimizer.clear_grad() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = paddle.concat([X_train, X_part], 0) y_train = paddle.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid
163
null
import torch from torch import nn from torch.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = torch.rand(2, 20) net(X) class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Module): def __init__(self, *args): super().__init__() for idx, module in enumerate(args): self._modules[str(idx)] = module def forward(self, X): for block in self._modules.values(): X = block(X) return X class FixedHiddenMLP(nn.Module): def __init__(self): super().__init__() self.rand_weight = torch.rand((20, 20), requires_grad=False) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(torch.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Module): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Layer): def __init__(self, *layers): super(MySequential, self).__init__() if len(layers) > 0 and isinstance(layers[0], tuple): for name, layer in layers: self.add_sublayer(name, layer) else: for idx, layer in enumerate(layers): self.add_sublayer(str(idx), layer) def forward(self, X): for layer in self._sub_layers.values(): X = layer(X) return X class FixedHiddenMLP(nn.Layer): def __init__(self): super().__init__() self.rand_weight = paddle.rand([20, 20]) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Layer): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
164
null
import torch from torch import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = torch.rand(size=(2, 4)) net(X) net.state_dict()['2.bias'].data def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_module(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, mean=0, std=0.01) nn.init.zeros_(m.bias) net.apply(init_normal) net[0].weight.data[0], net[0].bias.data[0] def init_constant(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 1) nn.init.zeros_(m.bias) net.apply(init_constant) net[0].weight.data[0], net[0].bias.data[0] def init_xavier(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def init_42(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 42) net[0].apply(init_xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: nn.init.uniform_(m.weight, -10, 10) m.weight.data *= m.weight.data.abs() >= 5 net.apply(my_init) net[0].weight[:2] net[0].weight.data[:] += 1 net[0].weight.data[0, 0] = 42 net[0].weight.data[0] layer = CenteredLayer() layer(torch.FloatTensor([1, 2, 3, 4, 5]))
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_sublayer(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: paddle.nn.initializer.Normal(mean=0.0, std=0.01) paddle.zeros(m.bias) net.apply(init_normal) net[0].weight[0],net[0].state_dict()['bias'] def init_constant(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(value = 1) paddle.zeros(m.bias) net.apply(init_constant) net[0].weight[0],net[0].state_dict()['bias'] def xavier(m): if type(m) == nn.Linear: paddle.nn.initializer.XavierUniform(m.weight) def init_42(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(42) net[0].apply(xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: for name, param in m.named_parameters()][0]) paddle.nn.initializer.XavierUniform(m.weight, -10, 10) h = paddle.abs(m.weight) >= 5 h = paddle.to_tensor(h) m = paddle.to_tensor(m.weight) m *= h net.apply(my_init) net[0].weight[:2] net[0].weight.set_value(net[0].weight.numpy() + 1) val = net[0].weight.numpy() val[0, 0] = 42 net[0].weight.set_value(val) net[0].weight[0] layer = CenteredLayer() layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))
165
null
import torch import torch.nn.functional as F from torch import nn class CenteredLayer(nn.Module): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(torch.rand(4, 8)) Y.mean() class MyLinear(nn.Module): def __init__(self, in_units, units): super().__init__() self.weight = nn.Parameter(torch.randn(in_units, units)) self.bias = nn.Parameter(torch.randn(units,)) def forward(self, X): linear = torch.matmul(X, self.weight.data) + self.bias.data return F.relu(linear) linear(torch.rand(2, 5)) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(torch.rand(2, 64))
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle import paddle.nn.functional as F from paddle import nn class CenteredLayer(nn.Layer): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(paddle.rand([4, 8])) Y.mean() class MyLinear(nn.Layer): def __init__(self, in_units, units): super().__init__() self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32') self.bias = paddle.create_parameter(shape=(units,), dtype='float32') def forward(self, X): linear = paddle.matmul(X, self.weight) + self.bias return F.relu(linear) linear(paddle.randn([2, 5])) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(paddle.rand([2, 64]))
166
null
import torch from torch import nn from torch.nn import functional as F x = torch.arange(4) torch.save(x, 'x-file') x2 = torch.load('x-file') y = torch.zeros(4) torch.save([x, y],'x-files') x2, y2 = torch.load('x-files') mydict = {'x': x, 'y': y} torch.save(mydict, 'mydict') mydict2 = torch.load('mydict') class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = torch.randn(size=(2, 20)) Y = net(X) torch.save(net.state_dict(), 'mlp.params') clone = MLP() clone.load_state_dict(torch.load('mlp.params')) clone.eval()
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F x = paddle.arange(4) paddle.save(x, 'x-file') x2 = paddle.load('x-file') y = paddle.zeros([4]) paddle.save([x,y], 'x-file') x2, y2 = paddle.load('x-file') mydict = {'x': x, 'y': y} paddle.save(mydict, 'mydict') mydict2 = paddle.load('mydict') class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = paddle.randn(shape=[2, 20]) Y = net(X) paddle.save(net.state_dict(), 'mlp.pdparams') clone = MLP() clone.set_state_dict(paddle.load('mlp.pdparams')) clone.eval()
167
null
import torch from torch import nn torch.device('cpu'), torch.device('cuda'), torch.device('cuda:1') torch.cuda.device_count() def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return devices = [torch.device(f'cuda:{i}') return torch.device('cpu') def try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] return devices if devices else [torch.device('cpu')] try_gpu(), try_gpu(10), try_all_gpus() x = torch.tensor([1, 2, 3]) x.device X = torch.ones(2, 3, device=try_gpu()) Y = torch.rand(2, 3, device=try_gpu(1)) net = nn.Sequential(nn.Linear(3, 1)) net = net.to(device=try_gpu()) net[0].weight.data.device
null
import paddle from paddle import nn paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1) paddle.device.cuda.device_count() if paddle.device.cuda.device_count() >= i + 1: return paddle.CUDAPlace(i) return paddle.CPUPlace() def try_all_gpus(): devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())] return devices if devices else paddle.CPUPlace() try_gpu(),try_gpu(10),try_all_gpus() x = paddle.to_tensor([1, 2, 3]) x.place X = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu()) Y = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1)) net = nn.Sequential(nn.Linear(3, 1)) net=net.to(try_gpu()) net[0].weight.place
168
null
import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 K = torch.tensor([[1.0, -1.0]]) conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() conv2d.weight.data[:] -= lr * conv2d.weight.grad conv2d.weight.data.reshape((1, 2))
null
import warningsfrom d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def corr2d(X, K): h, w = K.shape Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Layer): def __init__(self, kernel_size): super().__init__() self.weight = paddle.ParamAttr(paddle.rand(kernel_size)) self.bias = paddle.ParamAttr(paddle.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = paddle.ones((6, 8)) X[:, 2:6] = 0 K = paddle.to_tensor([[1.0, -1.0]]) conv2d = nn.Conv2D(1, 1, kernel_size=(1, 2)) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.clear_gradients() l.sum().backward() with paddle.no_grad(): conv2d.weight[:] -= lr * conv2d.weight.grad conv2d.weight.reshape((1, 2))
169
null
import torch from torch import nn def comp_conv2d(conv2d, X): X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn def comp_conv2d(conv2d, X): X = paddle.reshape(X, [1, 1] + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1) X = paddle.rand((8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
170
null
import torch from d2l import torch as d2l def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return torch.stack([corr2d_multi_in(X, k) for k in K], 0) K = torch.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = torch.matmul(K, X) return Y.reshape((c_o, h, w)) X = torch.normal(0, 1, (3, 3, 3)) K = torch.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return paddle.stack([corr2d_multi_in(X, k) for k in K], 0) K = paddle.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = paddle.matmul(K, X) return Y.reshape((c_o, h, w)) X = paddle.normal(0, 1, (3, 3, 3)) K = paddle.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(paddle.abs(Y1 - Y2).sum()) < 1e-6
171
null
import torch from torch import nn from d2l import torch as d2l def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2d(3) pool2d(X) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1)) pool2d(X) X = torch.cat((X, X + 1), 1) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = paddle.arange(16, dtype="float32").reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3, stride=3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3)) pool2d(X) X = paddle.concat((X, X + 1), 1) pool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2) pool2d(X)
172
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ',X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn, optimizer net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = paddle.rand((1, 1, 28, 28), 'float32') for layer in net: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2D: nn.initializer.XavierUniform(m.weight) net.apply(init_weights) net.to(device) optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.clear_grad() X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with paddle.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
173
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn net = nn.Sequential( nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = paddle.randn(shape=(1, 1, 224, 224)) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
174
null
import torch from torch import nn from d2l import torch as d2l def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2d(kernel_size=2,stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential( *conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = torch.randn(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2D(kernel_size=2, stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential(*conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = paddle.randn(shape=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
175
null
import torch from torch import nn from d2l import torch as d2l def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2d(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2d(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2d(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
176
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Inception(nn.Module): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return torch.cat((p1, p2, p3, p4), dim=1) b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2d((1,1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = torch.rand(size=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn import paddle.nn.functional as F class Inception(nn.Layer): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return paddle.concat(x=[p1, p2, p3, p4], axis=1) b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2,padding=1)) b2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2D(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = paddle.rand(shape=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
177
null
import torch from torch import nn from d2l import torch as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not torch.is_grad_enabled(): X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) X_hat = (X - mean) / torch.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)) net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True): if not is_training: X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5 else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = paddle.mean(X) var = paddle.mean(((X - mean) ** 2)) else: mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True) var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True) X_hat = (X - mean) / (var + eps) ** 0.5 moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Layer): def __init__(self, num_features, num_dims=4): super(BatchNorm, self).__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32'))) self.beta = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32'))) self.moving_mean = paddle.zeros(shape=shape, dtype='float32') self.moving_var = paddle.zeros(shape=shape, dtype='float32') def forward(self, X): Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training) return Y net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) param = net.parameters() print('gamma:', param[2].numpy().reshape(-1)) print('beta:', param[3].numpy().reshape(-1)) net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(), nn.Linear(84, 10))
178
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Residual(nn.Module): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3,3) X = torch.rand(4, 3, 6, 6) Y = blk(X) Y.shape blk = Residual(3,6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(), nn.Linear(512, 10)) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn from paddle.nn import functional as F class Residual(nn.Layer): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super(Residual, self).__init__() self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2D(num_channels) self.bn2 = nn.BatchNorm2D(num_channels) self.relu = nn.ReLU() def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3, 3) X = paddle.rand([4, 3, 6, 6]) Y = blk(X) Y.shape blk = Residual(3, 6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten(), nn.Linear(512, 10)) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
179
null
import torch from torch import nn from d2l import torch as d2l def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Module): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = torch.cat((X, Y), dim=1) return X blk = DenseBlock(2, 3, 10) X = torch.randn(4, 3, 8, 8) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=1), nn.AvgPool2d(kernel_size=2, stride=2)) b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) net = nn.Sequential( b1, *blks, nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Layer): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = paddle.concat(x=[X, Y], axis=1) return X blk = DenseBlock(2, 3, 10) X = paddle.randn([4, 3, 8, 8]) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=1), nn.AvgPool2D(kernel_size=2, stride=2)) b1 = nn.Sequential( nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) net = nn.Sequential( b1, *blks, nn.BatchNorm2D(num_channels), nn.ReLU(), nn.AdaptiveMaxPool2D((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
180
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn T = 1000 time = paddle.arange(1, T + 1, dtype=paddle.float32) x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = paddle.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.initializer.XavierUniform(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters()) for epoch in range(epochs): for i,(X, y) in enumerate (train_iter()): trainer.clear_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) multistep_preds = paddle.zeros([T]) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = paddle.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape([-1]) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
181
null
import collections import re from d2l import torch as d2l
null
import collections import re from d2l import paddle as d2l
182
null
import random import torch from d2l import torch as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield torch.tensor(X), torch.tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = torch.tensor(corpus[offset: offset + num_tokens]) Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield paddle.to_tensor(X), paddle.to_tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = paddle.to_tensor(corpus[offset: offset + num_tokens]) Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
183
null
import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh) torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4)) H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4)) paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh) paddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))
184
null
%matplotlib inline import math import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(torch.tensor([0, 2]), len(vocab)) X = torch.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = torch.zeros(num_hiddens, device=device) W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h) Y = torch.mm(H, W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size).type(torch.float32) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hiddens, device) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.to(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): state.detach_() else: for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(paddle.to_tensor([0, 2]), len(vocab)) X = paddle.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)* 0.01 W_xh = normal([num_inputs, num_hiddens]) W_hh = normal([num_hiddens, num_hiddens]) b_h = paddle.zeros(shape=[num_hiddens]) W_hq = normal([num_hiddens, num_outputs]) b_q = paddle.zeros(shape=[num_outputs]) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient=False return params def init_rnn_state(batch_size, num_hiddens): return (paddle.zeros(shape=[batch_size, num_hiddens]), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h) Y = paddle.mm(H, W_hq) + b_q outputs.append(Y) return paddle.concat(x=outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size): return self.init_state(batch_size, self.num_hiddens) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1) outputs = [vocab[prefix[0]]] get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1]))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Layer): params = [p for p in net.parameters() if not p.stop_gradient] else: params = net.params norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params)) if norm > theta: with paddle.no_grad(): for param in params: param.grad.set_value(param.grad * theta / norm) def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0]) else: if isinstance(net, nn.Layer) and not isinstance(state, tuple): state.stop_gradient=True else: for s in state: s.stop_gradient=True y = paddle.reshape(Y.T,shape=[-1]) X = paddle.to_tensor(X, place=device) y = paddle.to_tensor(y, place=device) y_hat, state = net(X, state) l = loss(y_hat, y).mean() if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Layer): updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
185
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.RNN(len(vocab), num_hiddens) state = torch.zeros((1, batch_size, num_hiddens)) state.shape X = torch.rand(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) net = net.to(device) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True) state = paddle.zeros(shape=[1, batch_size, num_hiddens]) state.shape X = paddle.rand(shape=[num_steps, batch_size, len(vocab)]) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if self.rnn.num_directions==1: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]) else: return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]), paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1.0 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
186
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as F from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_gru_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H,*_ = state outputs = [] for X in inputs: Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H,*_) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
187
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as Function from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_lstm_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens])) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * paddle.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
188
null
import os import torch from d2l import torch as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.type(torch.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.type(torch.int32)) print('Valid length of Y:', Y_valid_len) break
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import os import paddle def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(paddle.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y..astype(paddle.int32)) print('Valid length of Y:', Y_valid_len) break
189
x = tf.range(12) tf.size(x) X = tf.reshape(x, (3, 4)) tf.zeros((2, 3, 4)) tf.ones((2, 3, 4)) tf.random.normal(shape=[3, 4]) tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = tf.constant([1.0, 2, 4, 8]) y = tf.constant([2.0, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y tf.exp(x) X = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4)) Y = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) tf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1) tf.reduce_sum(X) a = tf.reshape(tf.range(3), (3, 1)) b = tf.reshape(tf.range(2), (1, 2)) X_var = tf.Variable(X) X_var[1, 2].assign(9) X_var = tf.Variable(X) X_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12) Z = tf.Variable(tf.zeros_like(Y)) Z.assign(X + Y) @tf.function def computation(X, Y): Z = tf.zeros_like(Y) A = X + Y B = A + Y C = B + Y return C + Y computation(X, Y) A = X.numpy() B = tf.constant(A) a = tf.constant([3.5]).numpy() print(a, a.item(), float(a), int(a))
x = torch.arange(12) x.numel() X = x.reshape(3, 4) torch.zeros((2, 3, 4)) torch.ones((2, 3, 4)) torch.randn(3, 4) torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y torch.exp(x) X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) X.sum() a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) X[1, 2] = 9 X[0:2, :] = 12 Z = torch.zeros_like(Y) Z[:] = X + Y before = id(X) X += Y id(X) == before A = X.numpy() B = torch.tensor(A) a = torch.tensor([3.5]) print(a, a.item(), float(a), int(a))
null
null
190
import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values)
import torch X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
null
null
191
import tensorflow as tf x = tf.constant(3.0) y = tf.constant(2.0) print(x + y, x * y, x / y, x**y) x = tf.range(4) A = tf.reshape(tf.range(20), (5, 4)) tf.transpose(A) B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == tf.transpose(B) X = tf.reshape(tf.range(24), (2, 3, 4)) A = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4)) B = A print(A, A + B) a = 2 X = tf.reshape(tf.range(24), (2, 3, 4)) print(a + X, (a * X).shape) x = tf.range(4, dtype=tf.float32) print(x, tf.reduce_sum(x)) a = tf.reduce_sum(A) A_sum_axis0 = tf.reduce_sum(A, axis=0) A_sum_axis1 = tf.reduce_sum(A, axis=1 tf.reduce_sum(A, axis=[0, 1]) tf.reduce_mean(A) tf.reduce_sum(A) / tf.size(A).numpy() tf.reduce_mean(A, axis=0) tf.reduce_sum(A, axis=0) / A.shape[0] sum_A = tf.reduce_sum(A, axis=1, keepdims=True) tf.cumsum(A, axis=0) y = tf.ones(4, dtype=tf.float32) print(tf.tensordot(x, y, axes=1)) tf.reduce_sum(x * y) A.shape, x.shape, tf.linalg.matvec(A, x) B = tf.ones((4, 3), tf.float32) tf.matmul(A, B) u = tf.constant([3.0, -4.0]) tf.norm(u) tf.reduce_sum(tf.abs(u)) tf.norm(tf.ones((4, 9)))
import torch x = torch.tensor(3.0) y = torch.tensor(2.0) print(x + y, x * y, x / y, x**y) x = torch.arange(4) A = torch.arange(20).reshape(5, 4) A.T B = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = torch.arange(24).reshape(2, 3, 4) A = torch.arange(20, dtype=torch.float32).reshape(5, 4) B = A.clone() print(A, A + B) a = 2 X = torch.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = torch.arange(4, dtype=torch.float32) print(x, x.sum()) a = A.sum() A_sum_axis0 = A.sum(axis=0) A_sum_axis1 = A.sum(axis=1) A.sum(axis=[0, 1]) A.mean() A.sum() / A.numel() A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) A.cumsum(axis=0) y = torch.ones(4, dtype = torch.float32) print(torch.dot(x, y)) torch.sum(x * y) A.shape, x.shape, torch.mv(A, x) B = torch.ones(4, 3) torch.mm(A, B) u = torch.tensor([3.0, -4.0]) torch.norm(u) torch.abs(u).sum() torch.norm(torch.ones((4, 9)))
null
null
192
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import tensorflow as d2l def f(x): return 3 * x ** 2 - 4 * x
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x
null
null
193
import tensorflow as tf x = tf.range(4, dtype=tf.float32) x = tf.Variable(x) with tf.GradientTape() as t: y = 2 * tf.tensordot(x, x, axes=1) x_grad = t.gradient(y, x) x_grad x_grad == 4 * x with tf.GradientTape() as t: y = tf.reduce_sum(x) t.gradient(y, x) with tf.GradientTape() as t: y = x * x t.gradient(y, x) with tf.GradientTape(persistent=True) as t: y = x * x u = tf.stop_gradient(y) z = u * x x_grad = t.gradient(z, x) x_grad == u t.gradient(y, x) == 2 * x def f(a): b = a * 2 while tf.norm(b) < 1000: b = b * 2 if tf.reduce_sum(b) > 0: c = b else: c = 100 * b return c a = tf.Variable(tf.random.normal(shape=())) with tf.GradientTape() as t: d = f(a) d_grad = t.gradient(d, a) d_grad d_grad == d / a
import torch x = torch.arange(4.0) x.requires_grad_(True) x.grad y = 2 * torch.dot(x, x) y.backward() x.grad x.grad == 4 * x x.grad.zero_() y = x.sum() y.backward() x.grad x.grad.zero_() y = x * x y.sum().backward() x.grad x.grad.zero_() y = x * x u = y.detach() z = u * x z.sum().backward() x.grad == u x.grad.zero_() y.sum().backward() x.grad == 2 * x def f(a): b = a * 2 while b.norm() < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = torch.randn(size=(), requires_grad=True) d = f(a) d.backward() a.grad == d / a
null
null
194
%matplotlib inline import numpy as np import tensorflow as tf import tensorflow_probability as tfp from d2l import tensorflow as d2l fair_probs = tf.ones(6) / 6 tfp.distributions.Multinomial(1, fair_probs).sample() tfp.distributions.Multinomial(10, fair_probs).sample() counts = tfp.distributions.Multinomial(1000, fair_probs).sample()
%matplotlib inline import torch from torch.distributions import multinomial from d2l import torch as d2l fair_probs = torch.ones([6]) / 6 multinomial.Multinomial(1, fair_probs).sample() multinomial.Multinomial(10, fair_probs).sample() counts = multinomial.Multinomial(1000, fair_probs).sample()
null
null
195
counts = tfp.distributions.Multinomial(10, fair_probs).sample(500) cum_counts = tf.cumsum(counts, axis=0) estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import tensorflow as tf a = dir(tf.random) help(tf.ones) tf.ones(4)
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) cum_counts = counts.cumsum(dim=0) estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import torch a = dir(torch.distributions) help(torch.ones) torch.ones(4)
null
null
196
%matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l n = 10000 a = tf.ones(n) b = tf.ones(n) c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i])
%matplotlib inline import math import time import numpy as np import torch from d2l import torch as d2l n = 10000 a = torch.ones(n) b = torch.ones(n) c = torch.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i]
null
null
197
%matplotlib inline import random import tensorflow as tf from d2l import tensorflow as d2l def synthetic_data(w, b, num_examples): X = tf.zeros((num_examples, w.shape[0])) X += tf.random.normal(shape=X.shape) y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b y += tf.random.normal(shape=y.shape, stddev=0.01) y = tf.reshape(y, (-1, 1)) return X, y true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): j = tf.constant(indices[i: min(i + batch_size, num_examples)]) yield tf.gather(features, j), tf.gather(labels, j) w = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True) b = tf.Variable(tf.zeros(1), trainable=True) def linreg(X, w, b): return tf.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2 def sgd(params, grads, lr, batch_size): for param, grad in zip(params, grads): param.assign_sub(lr*grad/batch_size) lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with tf.GradientTape() as g: l = loss(net(X, w, b), y) dw, db = g.gradient(l, [w, b]) sgd([w, b], [dw, db], lr, batch_size) train_l = loss(net(features, w, b), labels)
%matplotlib inline import random import torch from d2l import torch as d2l def synthetic_data(w, b, num_examples): X = torch.normal(0, 1, (num_examples, len(w))) y = torch.matmul(X, w) + b y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] w = torch.normal(0, 0.01, size=(2,1), requires_grad=True) b = torch.zeros(1, requires_grad=True) def linreg(X, w, b): return torch.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 def sgd(params, lr, batch_size): with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels)
null
null
198
import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = tf.data.Dataset.from_tensor_slices(data_arrays) if is_train: dataset = dataset.shuffle(buffer_size=1000) dataset = dataset.batch(batch_size) return dataset batch_size = 10 data_iter = load_array((features, labels), batch_size) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1)) initializer = tf.initializers.RandomNormal(stddev=0.01) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer)) loss = tf.keras.losses.MeanSquaredError() trainer = tf.keras.optimizers.SGD(learning_rate=0.03) w = net.get_weights()[0] b = net.get_weights()[1]
import numpy as np import torch from torch.utils import data from d2l import torch as d2l true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from torch import nn net = nn.Sequential(nn.Linear(2, 1)) net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) loss = nn.MSELoss() trainer = torch.optim.SGD(net.parameters(), lr=0.03) w = net[0].weight.data b = net[0].bias.data
null
null
199
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l d2l.use_svg_display() mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() len(mnist_train[0]), len(mnist_test[0]) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.numpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X = tf.constant(mnist_train[0][:18]) y = tf.constant(mnist_train[1][:18]) show_images(X, 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 train_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0])) def load_data_fashion_mnist(batch_size, resize=None): mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32')) resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y) return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn), tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))
%matplotlib inline import torch import torchvision from torch.utils import data from torchvision import transforms from d2l import torch as d2l d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = torchvision.datasets.FashionMNIST( root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST( root="../data", train=False, transform=trans, download=True) len(mnist_train), len(mnist_test) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if torch.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(data.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
null
200
import tensorflow as tf from IPython import display from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01)) b = tf.Variable(tf.zeros(num_outputs)) X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) tf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True) def softmax(X): X_exp = tf.exp(X) partition = tf.reduce_sum(X_exp, 1, keepdims=True) return X_exp / partition X = tf.random.normal((2, 5), 0, 1) X_prob = softmax(X) X_prob, tf.reduce_sum(X_prob, 1) def net(X): return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b) y_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y = tf.constant([0, 2]) tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])) def cross_entropy(y_hat, y): return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = tf.argmax(y_hat, axis=1) cmp = tf.cast(y_hat, y.dtype) == y return float(tf.reduce_sum(tf.cast(cmp, y.dtype))) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) return metric[0] / metric[2], metric[1] / metric[2] class Updater(): def __init__(self, params, lr): self.params = params self.lr = lr def __call__(self, batch_size, grads): d2l.sgd(self.params, grads, self.lr, batch_size) updater = Updater([W, b], lr=0.1) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
import torch from IPython import display from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = torch.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = torch.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b) y = torch.tensor([0, 2]) y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - torch.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.type(y.dtype) == y return float(cmp.type(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, torch.nn.Module): net.eval() metric = Accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2] lr = 0.1 def updater(batch_size): return d2l.sgd([W, b], lr, batch_size) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
null
null