= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())":4,"import collections\nimport re\nfrom d2l import paddle as d2l":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))":6,"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))":4,"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))":4,"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))":6,"counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))":6,"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()":4,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)":2,"trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))":6,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":4,"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\nX.sum()\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = paddle.zeros_like(Y)\nZ = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias":2,"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))":4,"x = paddle.arange(12)\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":6,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)":2,"import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)":2,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\nA.cumsum(axis=0)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)":6,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)":4,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()\na.grad == d / a":2,"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)":6,"import collections\nimport re\nfrom d2l import paddle as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)":2,"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)":4,"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))":4}}}}],"rows":[{"rowIdx":0,"cells":{"id":{"kind":"number","value":1,"string":"1"},"tensorflow":{"kind":"string","value":"x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\nX.sum()\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = paddle.zeros_like(Y)\nZ = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"}}},{"rowIdx":1,"cells":{"id":{"kind":"number","value":2,"string":"2"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"}}},{"rowIdx":2,"cells":{"id":{"kind":"number","value":3,"string":"3"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\nA.cumsum(axis=0)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"}}},{"rowIdx":3,"cells":{"id":{"kind":"number","value":4,"string":"4"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"}}},{"rowIdx":4,"cells":{"id":{"kind":"number","value":5,"string":"5"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()\na.grad == d / a"}}},{"rowIdx":5,"cells":{"id":{"kind":"number","value":6,"string":"6"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"}}},{"rowIdx":6,"cells":{"id":{"kind":"number","value":7,"string":"7"},"tensorflow":{"kind":"string","value":"counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"}}},{"rowIdx":7,"cells":{"id":{"kind":"number","value":8,"string":"8"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"}}},{"rowIdx":8,"cells":{"id":{"kind":"number","value":9,"string":"9"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"}}},{"rowIdx":9,"cells":{"id":{"kind":"number","value":10,"string":"10"},"tensorflow":{"kind":"string","value":"import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"}}},{"rowIdx":10,"cells":{"id":{"kind":"number","value":11,"string":"11"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"}}},{"rowIdx":11,"cells":{"id":{"kind":"number","value":12,"string":"12"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"}}},{"rowIdx":12,"cells":{"id":{"kind":"number","value":13,"string":"13"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"}}},{"rowIdx":13,"cells":{"id":{"kind":"number","value":14,"string":"14"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"}}},{"rowIdx":14,"cells":{"id":{"kind":"number","value":15,"string":"15"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"}}},{"rowIdx":15,"cells":{"id":{"kind":"number","value":16,"string":"16"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":16,"cells":{"id":{"kind":"number","value":17,"string":"17"},"tensorflow":{"kind":"string","value":"import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"}}},{"rowIdx":17,"cells":{"id":{"kind":"number","value":18,"string":"18"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"}}},{"rowIdx":18,"cells":{"id":{"kind":"number","value":19,"string":"19"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":19,"cells":{"id":{"kind":"number","value":20,"string":"20"},"tensorflow":{"kind":"string","value":"trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"}}},{"rowIdx":20,"cells":{"id":{"kind":"number","value":21,"string":"21"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"}}},{"rowIdx":21,"cells":{"id":{"kind":"number","value":22,"string":"22"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"}}},{"rowIdx":22,"cells":{"id":{"kind":"number","value":23,"string":"23"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"}}},{"rowIdx":23,"cells":{"id":{"kind":"number","value":24,"string":"24"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"}}},{"rowIdx":24,"cells":{"id":{"kind":"number","value":25,"string":"25"},"tensorflow":{"kind":"string","value":"import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"}}},{"rowIdx":25,"cells":{"id":{"kind":"number","value":26,"string":"26"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"}}},{"rowIdx":26,"cells":{"id":{"kind":"number","value":27,"string":"27"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"}}},{"rowIdx":27,"cells":{"id":{"kind":"number","value":28,"string":"28"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"}}},{"rowIdx":28,"cells":{"id":{"kind":"number","value":29,"string":"29"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"}}},{"rowIdx":29,"cells":{"id":{"kind":"number","value":30,"string":"30"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"}}},{"rowIdx":30,"cells":{"id":{"kind":"number","value":31,"string":"31"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"}}},{"rowIdx":31,"cells":{"id":{"kind":"number","value":32,"string":"32"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"}}},{"rowIdx":32,"cells":{"id":{"kind":"number","value":33,"string":"33"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)"}}},{"rowIdx":33,"cells":{"id":{"kind":"number","value":34,"string":"34"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":34,"cells":{"id":{"kind":"number","value":35,"string":"35"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":35,"cells":{"id":{"kind":"number","value":36,"string":"36"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"}}},{"rowIdx":36,"cells":{"id":{"kind":"number","value":37,"string":"37"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":37,"cells":{"id":{"kind":"number","value":38,"string":"38"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"}}},{"rowIdx":38,"cells":{"id":{"kind":"number","value":39,"string":"39"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"}}},{"rowIdx":39,"cells":{"id":{"kind":"number","value":40,"string":"40"},"tensorflow":{"kind":"string","value":"import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import collections\nimport re\nfrom d2l import paddle as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"}}},{"rowIdx":40,"cells":{"id":{"kind":"number","value":41,"string":"41"},"tensorflow":{"kind":"string","value":"import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"}}},{"rowIdx":41,"cells":{"id":{"kind":"number","value":42,"string":"42"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"}}},{"rowIdx":42,"cells":{"id":{"kind":"number","value":43,"string":"43"},"tensorflow":{"kind":"string","value":"%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"}}},{"rowIdx":43,"cells":{"id":{"kind":"number","value":44,"string":"44"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":44,"cells":{"id":{"kind":"number","value":45,"string":"45"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":45,"cells":{"id":{"kind":"number","value":46,"string":"46"},"tensorflow":{"kind":"string","value":"import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":46,"cells":{"id":{"kind":"number","value":47,"string":"47"},"tensorflow":{"kind":"string","value":"import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break"},"pytorch":{"kind":"null"},"mxnet":{"kind":"null"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"}}},{"rowIdx":47,"cells":{"id":{"kind":"number","value":48,"string":"48"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"x = np.arange(12)\nx.size\nX = x.reshape(3, 4)\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))"},"paddle":{"kind":"string","value":"x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"}}},{"rowIdx":48,"cells":{"id":{"kind":"number","value":49,"string":"49"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"}}},{"rowIdx":49,"cells":{"id":{"kind":"number","value":50,"string":"50"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nA.T\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.size\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"}}},{"rowIdx":50,"cells":{"id":{"kind":"number","value":51,"string":"51"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"}}},{"rowIdx":51,"cells":{"id":{"kind":"number","value":52,"string":"52"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()"}}},{"rowIdx":52,"cells":{"id":{"kind":"number","value":53,"string":"53"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"}}},{"rowIdx":53,"cells":{"id":{"kind":"number","value":54,"string":"54"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)"},"paddle":{"kind":"string","value":"counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"}}},{"rowIdx":54,"cells":{"id":{"kind":"number","value":55,"string":"55"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"}}},{"rowIdx":55,"cells":{"id":{"kind":"number","value":56,"string":"56"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"}}},{"rowIdx":56,"cells":{"id":{"kind":"number","value":57,"string":"57"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"}}},{"rowIdx":57,"cells":{"id":{"kind":"number","value":58,"string":"58"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"}}},{"rowIdx":58,"cells":{"id":{"kind":"number","value":59,"string":"59"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"}}},{"rowIdx":59,"cells":{"id":{"kind":"number","value":60,"string":"60"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"}}},{"rowIdx":60,"cells":{"id":{"kind":"number","value":61,"string":"61"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"}}},{"rowIdx":61,"cells":{"id":{"kind":"number","value":62,"string":"62"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"}}},{"rowIdx":62,"cells":{"id":{"kind":"number","value":63,"string":"63"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":63,"cells":{"id":{"kind":"number","value":64,"string":"64"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"}}},{"rowIdx":64,"cells":{"id":{"kind":"number","value":65,"string":"65"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"}}},{"rowIdx":65,"cells":{"id":{"kind":"number","value":66,"string":"66"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"}}},{"rowIdx":66,"cells":{"id":{"kind":"number","value":67,"string":"67"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))"},"paddle":{"kind":"string","value":"trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"}}},{"rowIdx":67,"cells":{"id":{"kind":"number","value":68,"string":"68"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"}}},{"rowIdx":68,"cells":{"id":{"kind":"number","value":69,"string":"69"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"}}},{"rowIdx":69,"cells":{"id":{"kind":"number","value":70,"string":"70"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"}}},{"rowIdx":70,"cells":{"id":{"kind":"number","value":71,"string":"71"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"}}},{"rowIdx":71,"cells":{"id":{"kind":"number","value":72,"string":"72"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"}}},{"rowIdx":72,"cells":{"id":{"kind":"number","value":73,"string":"73"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx"},"paddle":{"kind":"string","value":"import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"}}},{"rowIdx":73,"cells":{"id":{"kind":"number","value":74,"string":"74"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))"},"paddle":{"kind":"string","value":"import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"}}},{"rowIdx":74,"cells":{"id":{"kind":"number","value":75,"string":"75"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"},"paddle":{"kind":"string","value":"import warnings\nwarnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore")\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"}}},{"rowIdx":75,"cells":{"id":{"kind":"number","value":76,"string":"76"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"}}},{"rowIdx":76,"cells":{"id":{"kind":"number","value":77,"string":"77"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"}}},{"rowIdx":77,"cells":{"id":{"kind":"number","value":78,"string":"78"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"}}},{"rowIdx":78,"cells":{"id":{"kind":"number","value":79,"string":"79"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"}}},{"rowIdx":79,"cells":{"id":{"kind":"number","value":80,"string":"80"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"}}},{"rowIdx":80,"cells":{"id":{"kind":"number","value":81,"string":"81"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":81,"cells":{"id":{"kind":"number","value":82,"string":"82"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":82,"cells":{"id":{"kind":"number","value":83,"string":"83"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"}}},{"rowIdx":83,"cells":{"id":{"kind":"number","value":84,"string":"84"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"}}},{"rowIdx":84,"cells":{"id":{"kind":"number","value":85,"string":"85"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"}}},{"rowIdx":85,"cells":{"id":{"kind":"number","value":86,"string":"86"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"}}},{"rowIdx":86,"cells":{"id":{"kind":"number","value":87,"string":"87"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"import collections\nimport re\nfrom d2l import mxnet as d2l"},"paddle":{"kind":"string","value":"import collections\nimport re\nfrom d2l import paddle as d2l"}}},{"rowIdx":87,"cells":{"id":{"kind":"number","value":88,"string":"88"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"}}},{"rowIdx":88,"cells":{"id":{"kind":"number","value":89,"string":"89"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"}}},{"rowIdx":89,"cells":{"id":{"kind":"number","value":90,"string":"90"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"},"paddle":{"kind":"string","value":"%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"}}},{"rowIdx":90,"cells":{"id":{"kind":"number","value":91,"string":"91"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":91,"cells":{"id":{"kind":"number","value":92,"string":"92"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":92,"cells":{"id":{"kind":"number","value":93,"string":"93"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"}}},{"rowIdx":93,"cells":{"id":{"kind":"number","value":94,"string":"94"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"null"},"mxnet":{"kind":"string","value":"import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break"},"paddle":{"kind":"string","value":"import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"}}},{"rowIdx":94,"cells":{"id":{"kind":"number","value":95,"string":"95"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"x = torch.arange(12)\nx.numel()\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"},"mxnet":{"kind":"string","value":"x = np.arange(12)\nx.size\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))"},"paddle":{"kind":"null"}}},{"rowIdx":95,"cells":{"id":{"kind":"number","value":96,"string":"96"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"},"mxnet":{"kind":"string","value":"from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)"},"paddle":{"kind":"null"}}},{"rowIdx":96,"cells":{"id":{"kind":"number","value":97,"string":"97"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.numel()\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"},"mxnet":{"kind":"string","value":"from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.size\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))"},"paddle":{"kind":"null"}}},{"rowIdx":97,"cells":{"id":{"kind":"number","value":98,"string":"98"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"},"mxnet":{"kind":"string","value":"%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x"},"paddle":{"kind":"null"}}},{"rowIdx":98,"cells":{"id":{"kind":"number","value":99,"string":"99"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()"},"mxnet":{"kind":"string","value":"from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()"},"paddle":{"kind":"null"}}},{"rowIdx":99,"cells":{"id":{"kind":"number","value":100,"string":"100"},"tensorflow":{"kind":"null"},"pytorch":{"kind":"string","value":"%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"},"mxnet":{"kind":"string","value":"%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)"},"paddle":{"kind":"null"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":0,"numItemsPerPage":100,"numTotalItems":564,"offset":0,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTM0MzczNSwic3ViIjoiL2RhdGFzZXRzL093b3MvQ29kZVRyYW5zT2NlYW4tY29weSIsImV4cCI6MTc1NTM0NzMzNSwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.Sk4okmPDLZiTvFh77WdgGD9fSojztQrhUqw9Hmczcp5Y92ulVmW-a8PAsKyv0Kzp9WurinoT-X7q54l5Z1_1Ag","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
int64
1
564
tensorflow
stringclasses
52 values
pytorch
stringclasses
81 values
mxnet
stringclasses
66 values
paddle
stringclasses
73 values
1
x = tf.range(12) tf.size(x) X = tf.reshape(x, (3, 4)) tf.zeros((2, 3, 4)) tf.ones((2, 3, 4)) tf.random.normal(shape=[3, 4]) tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = tf.constant([1.0, 2, 4, 8]) y = tf.constant([2.0, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y tf.exp(x) X = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4)) Y = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) tf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1) tf.reduce_sum(X) a = tf.reshape(tf.range(3), (3, 1)) b = tf.reshape(tf.range(2), (1, 2)) X_var = tf.Variable(X) X_var[1, 2].assign(9) X_var = tf.Variable(X) X_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12) Z = tf.Variable(tf.zeros_like(Y)) Z.assign(X + Y) @tf.function def computation(X, Y): Z = tf.zeros_like(Y) A = X + Y B = A + Y C = B + Y return C + Y computation(X, Y) A = X.numpy() B = tf.constant(A) a = tf.constant([3.5]).numpy() print(a, a.item(), float(a), int(a))
null
null
x = paddle.arange(12) x.numel() X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) X = paddle.arange(12, dtype='float32').reshape((3, 4)) Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1) X.sum() a = paddle.reshape(paddle.arange(3), (3, 1)) b = paddle.reshape(paddle.arange(2), (1, 2)) X[1, 2] = 9 X[0:2, :] = 12 Z = paddle.zeros_like(Y) Z = X + Y before = id(X) X += Y id(X) == before A = X.numpy() B = paddle.to_tensor(A) type(A), type(B) a = paddle.to_tensor([3.5]) a, a.item(), float(a), int(a)
2
import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values)
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
3
import tensorflow as tf x = tf.constant(3.0) y = tf.constant(2.0) print(x + y, x * y, x / y, x**y) x = tf.range(4) A = tf.reshape(tf.range(20), (5, 4)) tf.transpose(A) B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == tf.transpose(B) X = tf.reshape(tf.range(24), (2, 3, 4)) A = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4)) B = A print(A, A + B) a = 2 X = tf.reshape(tf.range(24), (2, 3, 4)) print(a + X, (a * X).shape) x = tf.range(4, dtype=tf.float32) print(x, tf.reduce_sum(x)) a = tf.reduce_sum(A) A_sum_axis0 = tf.reduce_sum(A, axis=0) A_sum_axis1 = tf.reduce_sum(A, axis=1 tf.reduce_sum(A, axis=[0, 1]) tf.reduce_mean(A) tf.reduce_sum(A) / tf.size(A).numpy() tf.reduce_mean(A, axis=0) tf.reduce_sum(A, axis=0) / A.shape[0] sum_A = tf.reduce_sum(A, axis=1, keepdims=True) tf.cumsum(A, axis=0) y = tf.ones(4, dtype=tf.float32) print(tf.tensordot(x, y, axes=1)) tf.reduce_sum(x * y) A.shape, x.shape, tf.linalg.matvec(A, x) B = tf.ones((4, 3), tf.float32) tf.matmul(A, B) u = tf.constant([3.0, -4.0]) tf.norm(u) tf.reduce_sum(tf.abs(u)) tf.norm(tf.ones((4, 9)))
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle.transpose(B, perm=[1, 0]) X = paddle.reshape(paddle.arange(24), (2, 3, 4)) A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4)) B = A.clone() A, A + B a = 2 X = paddle.reshape(paddle.arange(24), (2, 3, 4)) a + X, (a * X).shape x = paddle.arange(4, dtype=paddle.float32) print(x, x.sum()) A.shape, A.sum() A_sum_axis0 = A.sum(axis=0) A_sum_axis1 = A.sum(axis=1) A.sum(axis=[0, 1]) A.mean(), A.sum() / A.numel() A.mean(axis=0), A.sum(axis=0) / A.shape[0] sum_A = paddle.sum(A, axis=1, keepdim=True) A.cumsum(axis=0) y = paddle.ones(shape=[4], dtype='float32') x, y, paddle.dot(x, y) paddle.sum(x * y) A.shape, x.shape, paddle.mv(A, x) B = paddle.ones(shape=[4, 3], dtype='float32') paddle.mm(A, B) u = paddle.to_tensor([3.0, -4.0]) paddle.norm(u) paddle.abs(u).sum() paddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))
4
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import tensorflow as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
null
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
5
import tensorflow as tf x = tf.range(4, dtype=tf.float32) x = tf.Variable(x) with tf.GradientTape() as t: y = 2 * tf.tensordot(x, x, axes=1) x_grad = t.gradient(y, x) x_grad x_grad == 4 * x with tf.GradientTape() as t: y = tf.reduce_sum(x) t.gradient(y, x) with tf.GradientTape() as t: y = x * x t.gradient(y, x) with tf.GradientTape(persistent=True) as t: y = x * x u = tf.stop_gradient(y) z = u * x x_grad = t.gradient(z, x) x_grad == u t.gradient(y, x) == 2 * x def f(a): b = a * 2 while tf.norm(b) < 1000: b = b * 2 if tf.reduce_sum(b) > 0: c = b else: c = 100 * b return c a = tf.Variable(tf.random.normal(shape=())) with tf.GradientTape() as t: d = f(a) d_grad = t.gradient(d, a) d_grad d_grad == d / a
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) y.backward() x.grad x.grad == 4 * x x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backward() x.grad x.clear_gradient() y = x * x u = y.detach() z = u * x paddle.sum(z).backward() x.grad == u x.clear_gradient() paddle.sum(y).backward() x.grad == 2 * x def f(a): b = a * 2 while paddle.norm(b) < 1000: b = b * 2 if paddle.sum(b) > 0: c = b else: c = 100 * b return c a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False) d = f(a) d.backward() a.grad == d / a
6
%matplotlib inline import numpy as np import tensorflow as tf import tensorflow_probability as tfp from d2l import tensorflow as d2l fair_probs = tf.ones(6) / 6 tfp.distributions.Multinomial(1, fair_probs).sample() tfp.distributions.Multinomial(10, fair_probs).sample() counts = tfp.distributions.Multinomial(1000, fair_probs).sample()
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000 counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000
7
counts = tfp.distributions.Multinomial(10, fair_probs).sample(500) cum_counts = tf.cumsum(counts, axis=0) estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import tensorflow as tf a = dir(tf.random) help(tf.ones) tf.ones(4)
null
null
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend() import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle help(paddle.ones) paddle.ones([4], dtype='float32')
8
%matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l n = 10000 a = tf.ones(n) b = tf.ones(n) c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i]) x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
9
%matplotlib inline import random import tensorflow as tf from d2l import tensorflow as d2l def synthetic_data(w, b, num_examples): X = tf.zeros((num_examples, w.shape[0])) X += tf.random.normal(shape=X.shape) y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b y += tf.random.normal(shape=y.shape, stddev=0.01) y = tf.reshape(y, (-1, 1)) return X, y true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): j = tf.constant(indices[i: min(i + batch_size, num_examples)]) yield tf.gather(features, j), tf.gather(labels, j) batch_size = 10 for X, y in data_iter(batch_size, features, labels): print(X, ' ', y) break w = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True) b = tf.Variable(tf.zeros(1), trainable=True) def linreg(X, w, b): return tf.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2 def sgd(params, grads, lr, batch_size): for param, grad in zip(params, grads): param.assign_sub(lr*grad/batch_size) lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with tf.GradientTape() as g: l = loss(net(X, w, b), y) dw, db = g.gradient(l, [w, b]) sgd([w, b], [dw, db], lr, batch_size) train_l = loss(net(features, w, b), labels)
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): break w = paddle.normal(0, 0.01, shape=(2,1)) b = paddle.zeros(shape=[1]) w.stop_gradient = False b.stop_gradient = False def linreg(X, w, b): return paddle.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 with paddle.no_grad(): for i, param in enumerate(params): param -= lr * params[i].grad / batch_size params[i].set_value(param) params[i].clear_gradient() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with paddle.no_grad(): train_l = loss(net(features, w, b), labels)
10
import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = tf.data.Dataset.from_tensor_slices(data_arrays) if is_train: dataset = dataset.shuffle(buffer_size=1000) dataset = dataset.batch(batch_size) return dataset batch_size = 10 data_iter = load_array((features, labels), batch_size) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1)) initializer = tf.initializers.RandomNormal(stddev=0.01) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer)) loss = tf.keras.losses.MeanSquaredError() trainer = tf.keras.optimizers.SGD(learning_rate=0.03) w = net.get_weights()[0] b = net.get_weights()[1]
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(data_arrays) return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True) batch_size = 10 data_iter = load_array((features, labels), batch_size) from paddle import nn net = nn.Sequential(nn.Linear(2, 1)) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01)) bias_attr = paddle.ParamAttr(initializer=None) net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters()) w = net[0].weight b = net[0].bias
11
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l d2l.use_svg_display() mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() len(mnist_train[0]), len(mnist_test[0]) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.numpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X = tf.constant(mnist_train[0][:18]) y = tf.constant(mnist_train[1][:18]) show_images(X, 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 train_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0])) def load_data_fashion_mnist(batch_size, resize=None): mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32')) resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y) return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn), tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) len(mnist_train), len(mnist_test) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if paddle.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()) trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()), paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))
12
import tensorflow as tf from IPython import display from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01)) b = tf.Variable(tf.zeros(num_outputs)) X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) tf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True) def softmax(X): X_exp = tf.exp(X) partition = tf.reduce_sum(X_exp, 1, keepdims=True) return X_exp / partition X = tf.random.normal((2, 5), 0, 1) X_prob = softmax(X) X_prob, tf.reduce_sum(X_prob, 1) def net(X): return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b) y_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y = tf.constant([0, 2]) tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])) def cross_entropy(y_hat, y): return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = tf.argmax(y_hat, axis=1) cmp = tf.cast(y_hat, y.dtype) == y return float(tf.reduce_sum(tf.cast(cmp, y.dtype))) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) return metric[0] / metric[2], metric[1] / metric[2] class Updater(): def __init__(self, params, lr): self.params = params self.lr = lr def __call__(self, batch_size, grads): d2l.sgd(self.params, grads, self.lr, batch_size) updater = Updater([W, b], lr=0.1) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=(num_outputs,)) W.stop_gradient=False b.stop_gradient=False X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = paddle.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = paddle.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b) y = paddle.to_tensor([0, 2]) y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) if len(y_hat.shape) < len(y.shape): cmp = y_hat.astype(y.dtype) == y.squeeze() else: cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, paddle.nn.Layer): net.eval() metric = Accumulator(2) with paddle.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, paddle.nn.Layer): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2] lr = 0.1 def updater(batch_size): return d2l.sgd([W, b], lr, batch_size) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
13
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = tf.keras.models.Sequential() net.add(tf.keras.layers.Flatten(input_shape=(28, 28))) weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01) net.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer)) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=.1)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initializer.Normal(m.weight, std=0.01) net.apply(init_weights); loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())
14
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32) y = tf.nn.relu(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.relu(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = tf.nn.tanh(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.tanh(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = paddle.nn.functional.sigmoid(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = paddle.tanh(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
15
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01)) b1 = tf.Variable(tf.zeros(num_hiddens)) W2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01)) b2 = tf.Variable(tf.zeros(num_outputs)) params = [W1, b1, W2, b2] def relu(X): return tf.math.maximum(X, 0) def net(X): X = tf.reshape(X, (-1, num_inputs)) H = relu(tf.matmul(X, W1) + b1) return tf.matmul(H, W2) + b2 def loss(y_hat, y): return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True) num_epochs, lr = 10, 0.1 updater = d2l.Updater([W1, W2, b1, b2], lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = False b1 = paddle.zeros([num_hiddens]) b1.stop_gradient = False W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01 W2.stop_gradient = False b2 = paddle.zeros([num_outputs]) b2.stop_gradient = False params = [W1, b1, W2, b2] def relu(X): a = paddle.zeros_like(X) return paddle.maximum(X, a) def net(X): X = X.reshape((-1, num_inputs)) H = relu(X@W1 + b1) return (H@W2 + b2) loss = nn.CrossEntropyLoss(reduction='none') num_epochs, lr = 10, 0.1 updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
16
import tensorflow as tf from d2l import tensorflow as d2l net = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)]) batch_size, lr, num_epochs = 256, 0.1, 10 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01)) layer.weight_attr = weight_attr batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
17
import math import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(tf.reduce_sum(l), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = tf.losses.MeanSquaredError() input_shape = train_features.shape[-1] net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, use_bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = tf.keras.optimizers.SGD(learning_rate=.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: out = net(X) y = y.reshape(out.shape) l = loss(out, y) metric.add(l.sum(), l.numel()) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss() input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False) trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
18
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1))) b = tf.Variable(tf.zeros(shape=(1, ))) return [w, b] def l2_penalty(w): return tf.reduce_sum(tf.pow(w, 2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + lambd * l2_penalty(w) grads = tape.gradient(l, [w, b]) d2l.sgd([w, b], grads, lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd))) net.build(input_shape=(1, num_inputs)) w, b = net.trainable_variables loss = tf.keras.losses.MeanSquaredError() num_epochs, lr = 100, 0.003 trainer = tf.keras.optimizers.SGD(learning_rate=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + net.losses grads = tape.gradient(l, net.trainable_variables) trainer.apply_gradients(zip(grads, net.trainable_variables)) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = paddle.normal(0, 1, shape=(num_inputs, 1)) w.stop_gradient = False b = paddle.zeros(shape=[1]) b.stop_gradient = False return [w, b] def l2_penalty(w): return paddle.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter(): l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() num_epochs, lr = 100, 0.003 trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() trainer.step() trainer.clear_grad() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
19
import tensorflow as tf from d2l import tensorflow as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return tf.zeros_like(X) if dropout == 0: return X mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout) X = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8)) num_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(tf.keras.Model): def __init__(self, num_outputs, num_hiddens1, num_hiddens2): super().__init__() self.input_layer = tf.keras.layers.Flatten() self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu') self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu') self.output_layer = tf.keras.layers.Dense(num_outputs) def call(self, inputs, training=None): x = self.input_layer(inputs) x = self.hidden1(x) if training: x = dropout_layer(x, dropout1) x = self.hidden2(x) if training: x = dropout_layer(x, dropout2) x = self.output_layer(x) return x net = Net(num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout1), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout2), tf.keras.layers.Dense(10), ]) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) if dropout == 0: return X mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32') return mask * X / (1.0 - dropout) X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8)) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(nn.Layer): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01)) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10, weight_attr=weight_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
20
trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = tf.random.normal((4, 4)) for i in range(100): M = tf.matmul(M, tf.random.normal((4, 4)))
null
null
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.sigmoid(x) y.backward(paddle.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = paddle.normal(0, 1, shape=(4,4)) for i in range(100): M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))
21
%matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from d2l import tensorflow as d2l n_train = train_data.shape[0] train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32) test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32) train_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32) loss = tf.keras.losses.MeanSquaredError() def get_net(): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay))) return net def log_rmse(y_true, y_pred): clipped_preds = tf.clip_by_value(y_pred, 1, float('inf')) return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds)))) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = tf.keras.optimizers.Adam(learning_rate) net.compile(loss=loss, optimizer=optimizer) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) l = loss(y, y_hat) params = net.trainable_variables grads = tape.gradient(l, params) optimizer.apply_gradients(zip(grads, params)) train_ls.append(log_rmse(train_labels, net(train_features))) if test_labels is not None: test_ls.append(log_rmse(test_labels, net(test_features))) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = tf.concat([X_train, X_part], 0) y_train = tf.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
null
null
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32) test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32) train_labels = paddle.to_tensor( train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32) loss = nn.MSELoss() in_features = train_features.shape[1] def get_net(): net = nn.Sequential(nn.Linear(in_features,1)) return net def log_rmse(net, features, labels): clipped_preds = paddle.clip(net(features), 1, float('inf')) rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() optimizer.step() optimizer.clear_grad() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = paddle.concat([X_train, X_part], 0) y_train = paddle.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).detach().numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
22
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) X = tf.random.uniform((2, 20)) net(X) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, X): return self.out(self.hidden((X))) class MySequential(tf.keras.Model): def __init__(self, *args): super().__init__() self.modules = [] for block in args: self.modules.append(block) def call(self, X): for module in self.modules: X = module(X) return X net = MySequential( tf.keras.layers.Dense(units=256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) net(X) class FixedHiddenMLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.rand_weight = tf.constant(tf.random.uniform((20, 20))) self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu) def call(self, inputs): X = self.flatten(inputs) X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1) X = self.dense(X) while tf.reduce_sum(tf.math.abs(X)) > 1: X /= 2 return tf.reduce_sum(X) class NestMLP(tf.keras.Model): def __init__(self): super().__init__() self.net = tf.keras.Sequential() self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu)) self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu) def call(self, inputs): return self.dense(self.net(inputs)) chimera = tf.keras.Sequential() chimera.add(NestMLP()) chimera.add(tf.keras.layers.Dense(20)) chimera.add(FixedHiddenMLP()) chimera(X)
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Layer): def __init__(self, *layers): super(MySequential, self).__init__() if len(layers) > 0 and isinstance(layers[0], tuple): for name, layer in layers: self.add_sublayer(name, layer) else: for idx, layer in enumerate(layers): self.add_sublayer(str(idx), layer) def forward(self, X): for layer in self._sub_layers.values(): X = layer(X) return X net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) net(X) class FixedHiddenMLP(nn.Layer): def __init__(self): super().__init__() self.rand_weight = paddle.rand([20, 20]) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Layer): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
23
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X) net.get_weights()[1] def block1(name): return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name) def block2(): net = tf.keras.Sequential() for i in range(4): net.add(block1(name=f'block-{i}')) return net rgnet = tf.keras.Sequential() rgnet.add(block2()) rgnet.add(tf.keras.layers.Dense(1)) rgnet(X) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1)]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1), ]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()), tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)), ]) net(X) class MyInit(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=None): data=tf.random.uniform(shape, -10, 10, dtype=dtype) factor=(tf.abs(data) >= 5) factor=tf.cast(factor, tf.float32) return data * factor net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()), tf.keras.layers.Dense(1)) net(X) net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1) net.layers[1].weights[0][0, 0].assign(42) net.layers[1].weights[0] layer = CenteredLayer() layer(tf.constant([1, 2, 3, 4, 5])) net = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_sublayer(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: paddle.nn.initializer.Normal(mean=0.0, std=0.01) paddle.zeros(m.bias) net.apply(init_normal) net[0].weight[0],net[0].state_dict()['bias'] def init_constant(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(value = 1) paddle.zeros(m.bias) net.apply(init_constant) net[0].weight[0],net[0].state_dict()['bias'] def xavier(m): if type(m) == nn.Linear: paddle.nn.initializer.XavierUniform(m.weight) def init_42(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(42) net[0].apply(xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: for name, param in m.named_parameters()][0]) paddle.nn.initializer.XavierUniform(m.weight, -10, 10) h = paddle.abs(m.weight) >= 5 h = paddle.to_tensor(h) m = paddle.to_tensor(m.weight) m *= h net.apply(my_init) net[0].weight[:2] net[0].weight.set_value(net[0].weight.numpy() + 1) val = net[0].weight.numpy() val[0, 0] = 42 net[0].weight.set_value(val) net[0].weight[0] layer = CenteredLayer() layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32')) net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
24
import tensorflow as tf class CenteredLayer(tf.keras.Model): def __init__(self): super().__init__() def call(self, inputs): return inputs - tf.reduce_mean(inputs) Y = net(tf.random.uniform((4, 8))) tf.reduce_mean(Y) class MyDense(tf.keras.Model): def __init__(self, units): super().__init__() self.units = units def build(self, X_shape): self.weight = self.add_weight(name='weight', shape=[X_shape[-1], self.units], initializer=tf.random_normal_initializer()) self.bias = self.add_weight( name='bias', shape=[self.units], initializer=tf.zeros_initializer()) def call(self, X): linear = tf.matmul(X, self.weight) + self.bias return tf.nn.relu(linear) dense = MyDense(3) dense(tf.random.uniform((2, 5))) dense.get_weights() dense(tf.random.uniform((2, 5))) net = tf.keras.models.Sequential([MyDense(8), MyDense(1)]) net(tf.random.uniform((2, 64)))
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle import paddle.nn.functional as F from paddle import nn class CenteredLayer(nn.Layer): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(paddle.rand([4, 8])) Y.mean() class MyLinear(nn.Layer): def __init__(self, in_units, units): super().__init__() self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32') self.bias = paddle.create_parameter(shape=(units,), dtype='float32') def forward(self, X): linear = paddle.matmul(X, self.weight) + self.bias return F.relu(linear) linear = MyLinear(5, 3) linear.weight linear(paddle.randn([2, 5])) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(paddle.rand([2, 64]))
25
import numpy as np import tensorflow as tf x = tf.range(4) np.save('x-file.npy', x) x2 = np.load('x-file.npy', allow_pickle=True) y = tf.zeros(4) np.save('xy-files.npy', [x, y]) x2, y2 = np.load('xy-files.npy', allow_pickle=True) mydict = {'x': x, 'y': y} np.save('mydict.npy', mydict) mydict2 = np.load('mydict.npy', allow_pickle=True) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, inputs): x = self.flatten(inputs) x = self.hidden(x) return self.out(x) net = MLP() X = tf.random.uniform((2, 20)) Y = net(X) net.save_weights('mlp.params') clone = MLP() clone.load_weights('mlp.params')
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F x = paddle.arange(4) paddle.save(x, 'x-file') x2 = paddle.load('x-file') y = paddle.zeros([4]) paddle.save([x,y], 'x-file') x2, y2 = paddle.load('x-file') mydict = {'x': x, 'y': y} paddle.save(mydict, 'mydict') mydict2 = paddle.load('mydict') class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = paddle.randn(shape=[2, 20]) Y = net(X) paddle.save(net.state_dict(), 'mlp.pdparams') clone = MLP() clone.set_state_dict(paddle.load('mlp.pdparams')) clone.eval()
26
import tensorflow as tf tf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1') len(tf.config.experimental.list_physical_devices('GPU')) def try_gpu(i=0): if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1: return tf.device(f'/GPU:{i}') return tf.device('/CPU:0') def try_all_gpus(): num_gpus = len(tf.config.experimental.list_physical_devices('GPU')) devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)] return devices if devices else [tf.device('/CPU:0')] try_gpu(), try_gpu(10), try_all_gpus() x = tf.constant([1, 2, 3]) x.device with try_gpu(): X = tf.ones((2, 3)) with try_gpu(1): Y = tf.random.uniform((2, 3)) with try_gpu(1): Z = X with try_gpu(1): Z2 = Z Z2 is Z strategy = tf.distribute.MirroredStrategy() with strategy.scope(): net = tf.keras.models.Sequential([ tf.keras.layers.Dense(1)]) net.layers[0].weights[0].device, net.layers[0].weights[1].device
null
null
import paddle from paddle import nn paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1) paddle.device.cuda.device_count() if paddle.device.cuda.device_count() >= i + 1: return paddle.CUDAPlace(i) return paddle.CPUPlace() def try_all_gpus(): devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())] return devices if devices else paddle.CPUPlace() try_gpu(),try_gpu(10),try_all_gpus() x = paddle.to_tensor([1, 2, 3]) x.place X = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu()) Y = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1)) Z = X.cuda(1) Z.cuda(1) is Z net = nn.Sequential(nn.Linear(3, 1)) net=net.to(try_gpu()) net[0].weight.place
27
import tensorflow as tf from d2l import tensorflow as d2l def corr2d(X, K): h, w = K.shape Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j].assign(tf.reduce_sum( X[i: i + h, j: j + w] * K)) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = tf.constant([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(tf.keras.layers.Layer): def __init__(self): super().__init__() def build(self, kernel_size): initializer = tf.random_normal_initializer() self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer) self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer) def call(self, inputs): return corr2d(inputs, self.weight) + self.bias X = tf.Variable(tf.ones((6, 8))) X[:, 2:6].assign(tf.zeros(X[:, 2:6].shape)) K = tf.constant([[1.0, -1.0]]) corr2d(tf.transpose(X), K) conv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False) X = tf.reshape(X, (1, 6, 8, 1)) Y = tf.reshape(Y, (1, 6, 7, 1)) lr = 3e-2 Y_hat = conv2d(X) for i in range(10): with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(conv2d.weights[0]) Y_hat = conv2d(X) l = (abs(Y_hat - Y)) ** 2 update = tf.multiply(lr, g.gradient(l, conv2d.weights[0])) weights = conv2d.get_weights() weights[0] = conv2d.weights[0] - update conv2d.set_weights(weights) tf.reshape(conv2d.get_weights()[0], (1, 2))
null
null
import warningsfrom d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def corr2d(X, K): h, w = K.shape Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Layer): def __init__(self, kernel_size): super().__init__() self.weight = paddle.ParamAttr(paddle.rand(kernel_size)) self.bias = paddle.ParamAttr(paddle.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = paddle.ones((6, 8)) X[:, 2:6] = 0 K = paddle.to_tensor([[1.0, -1.0]]) corr2d(X.t(), K) conv2d = nn.Conv2D(1, 1, kernel_size=(1, 2)) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.clear_gradients() l.sum().backward() with paddle.no_grad(): conv2d.weight[:] -= lr * conv2d.weight.grad conv2d.weight.reshape((1, 2))
28
import tensorflow as tf def comp_conv2d(conv2d, X): X = tf.reshape(X, (1, ) + X.shape + (1, )) Y = conv2d(X) return tf.reshape(Y, Y.shape[1:3]) conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same') X = tf.random.uniform(shape=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same') comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4)) comp_conv2d(conv2d, X).shape
null
null
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn def comp_conv2d(conv2d, X): X = paddle.reshape(X, [1, 1] + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1) X = paddle.rand((8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
29
import tensorflow as tf from d2l import tensorflow as d2l def corr2d_multi_in(X, K): return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0) X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return tf.stack([corr2d_multi_in(X, k) for k in K], 0) K = tf.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = tf.reshape(X, (c_i, h * w)) K = tf.reshape(K, (c_o, c_i)) Y = tf.matmul(K, X) return tf.reshape(Y, (c_o, h, w)) X = tf.random.normal((3, 3, 3), 0, 1) K = tf.random.normal((2, 3, 1, 1), 0, 1) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return paddle.stack([corr2d_multi_in(X, k) for k in K], 0) K = paddle.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = paddle.matmul(K, X) return Y.reshape((c_o, h, w)) X = paddle.normal(0, 1, (3, 3, 3)) K = paddle.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(paddle.abs(Y1 - Y2).sum()) < 1e-6
30
import tensorflow as tf def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w])) elif mode =='avg': Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w])) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1)) pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3]) pool2d(X) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded) paddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid', strides=(2, 3)) pool2d(X_padded) X = tf.concat([X, X + 1], 3) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = paddle.arange(16, dtype="float32").reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3, stride=3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3)) pool2d(X) X = paddle.concat((X, X + 1), 1) pool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2) pool2d(X)
31
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120, activation='sigmoid'), tf.keras.layers.Dense(84, activation='sigmoid'), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 28, 28, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) class TrainCallback(tf.keras.callbacks.Callback): def __init__(self, net, train_iter, test_iter, num_epochs, device_name): self.timer = d2l.Timer() self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) self.net = net self.train_iter = train_iter self.test_iter = test_iter self.num_epochs = num_epochs self.device_name = device_name def on_epoch_begin(self, epoch, logs=None): self.timer.start() def on_epoch_end(self, epoch, logs): self.timer.stop() test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy'] metrics = (logs['loss'], logs['accuracy'], test_acc) self.animator.add(epoch + 1, metrics) if epoch == self.num_epochs - 1: batch_size = next(iter(self.train_iter))[0].shape[0] num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy() def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device): device_name = device._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): optimizer = tf.keras.optimizers.SGD(learning_rate=lr) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) net = net_fn() net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name) net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback]) return net
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn, optimizer net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = paddle.rand((1, 1, 28, 28), 'float32') for layer in net: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2D: nn.initializer.XavierUniform(m.weight) net.apply(init_weights) net.to(device) optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.clear_grad() X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with paddle.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
32
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn net = nn.Sequential( nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = paddle.randn(shape=(1, 1, 224, 224)) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
33
import tensorflow as tf from d2l import tensorflow as d2l def vgg_block(num_convs, num_channels): blk = tf.keras.models.Sequential() for _ in range(num_convs): blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu')) blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = tf.keras.models.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)])) return net net = vgg(conv_arch) X = tf.random.uniform((1, 224, 224, 1)) for blk in net.layers: X = blk(X) print(blk.__class__.__name__,'output shape: ', X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = lambda: vgg(small_conv_arch)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2D(kernel_size=2, stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential(*conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = paddle.randn(shape=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = vgg(small_conv_arch)
34
import tensorflow as tf from d2l import tensorflow as d2l def nin_block(num_channels, kernel_size, strides, padding): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')]) def net(): return tf.keras.models.Sequential([ nin_block(96, kernel_size=11, strides=4, padding='valid'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding='same'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Reshape((1, 1, 10)), tf.keras.layers.Flatten(), ]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
35
import tensorflow as tf from d2l import tensorflow as d2l class Inception(tf.keras.Model): def __init__(self, c1, c2, c3, c4): super().__init__() self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu') self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu') self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu') self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu') self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu') self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same') self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu') def call(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return tf.keras.layers.Concatenate()([p1, p2, p3, p4]) def b1(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b2(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, 1, activation='relu'), tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b3(): return tf.keras.models.Sequential([ Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b4(): return tf.keras.Sequential([ Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b5(): return tf.keras.Sequential([ Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Flatten() ]) def net(): return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(), tf.keras.layers.Dense(10)]) X = tf.random.uniform(shape=(1, 96, 96, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn import paddle.nn.functional as F class Inception(nn.Layer): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return paddle.concat(x=[p1, p2, p3, p4], axis=1) b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2,padding=1)) b2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2D(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = paddle.rand(shape=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
36
import tensorflow as tf from d2l import tensorflow as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps): inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype) inv *= gamma Y = X * inv + (beta - moving_mean * inv) return Y class BatchNorm(tf.keras.layers.Layer): def __init__(self, **kwargs): super(BatchNorm, self).__init__(**kwargs) def build(self, input_shape): weight_shape = [input_shape[-1], ] self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True) self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True) self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False) self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False) super(BatchNorm, self).build(input_shape) def assign_moving_average(self, variable, value): momentum = 0.9 delta = variable * momentum + value * (1 - momentum) return variable.assign(delta) @tf.function def call(self, inputs, training): if training: axes = list(range(len(inputs.shape) - 1)) batch_mean = tf.reduce_mean(inputs, axes, keepdims=True) batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True) batch_mean = tf.squeeze(batch_mean, axes) batch_variance = tf.squeeze(batch_variance, axes) mean_update = self.assign_moving_average(self.moving_mean, batch_mean) variance_update = self.assign_moving_average(self.moving_variance, batch_variance) self.add_update(mean_update) self.add_update(variance_update) mean, variance = batch_mean, batch_variance else: mean, variance = self.moving_mean, self.moving_variance output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5) return output def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10)] ) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) tf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,)) def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10), ])
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True): if not is_training: X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5 else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = paddle.mean(X) var = paddle.mean(((X - mean) ** 2)) else: mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True) var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True) X_hat = (X - mean) / (var + eps) ** 0.5 moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Layer): def __init__(self, num_features, num_dims=4): super(BatchNorm, self).__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32'))) self.beta = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32'))) self.moving_mean = paddle.zeros(shape=shape, dtype='float32') self.moving_var = paddle.zeros(shape=shape, dtype='float32') def forward(self, X): Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training) return Y net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) param = net.parameters() print('gamma:', param[2].numpy().reshape(-1)) print('beta:', param[3].numpy().reshape(-1)) net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(), nn.Linear(84, 10))
37
import tensorflow as tf from d2l import tensorflow as d2l class Residual(tf.keras.Model): def __init__(self, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = tf.keras.layers.Conv2D( num_channels, padding='same', kernel_size=3, strides=strides) self.conv2 = tf.keras.layers.Conv2D( num_channels, kernel_size=3, padding='same') self.conv3 = None if use_1x1conv: self.conv3 = tf.keras.layers.Conv2D( num_channels, kernel_size=1, strides=strides) self.bn1 = tf.keras.layers.BatchNormalization() self.bn2 = tf.keras.layers.BatchNormalization() def call(self, X): Y = tf.keras.activations.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3 is not None: X = self.conv3(X) Y += X return tf.keras.activations.relu(Y) blk = Residual(3) X = tf.random.uniform((4, 6, 6, 3)) Y = blk(X) Y.shape blk = Residual(6, use_1x1conv=True, strides=2) blk(X).shape b1 = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) class ResnetBlock(tf.keras.layers.Layer): def __init__(self, num_channels, num_residuals, first_block=False, **kwargs): super(ResnetBlock, self).__init__(**kwargs) self.residual_layers = [] for i in range(num_residuals): if i == 0 and not first_block: self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2)) else: self.residual_layers.append(Residual(num_channels)) def call(self, X): for layer in self.residual_layers.layers: X = layer(X) return X b2 = ResnetBlock(64, 2, first_block=True) b3 = ResnetBlock(128, 2) b4 = ResnetBlock(256, 2) b5 = ResnetBlock(512, 2) def net(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'), ResnetBlock(64, 2, first_block=True), ResnetBlock(128, 2), ResnetBlock(256, 2), ResnetBlock(512, 2), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Dense(units=10)]) X = tf.random.uniform(shape=(1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn from paddle.nn import functional as F class Residual(nn.Layer): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super(Residual, self).__init__() self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2D(num_channels) self.bn2 = nn.BatchNorm2D(num_channels) self.relu = nn.ReLU() def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3, 3) X = paddle.rand([4, 3, 6, 6]) Y = blk(X) Y.shape blk = Residual(3, 6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten(), nn.Linear(512, 10)) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
38
import tensorflow as tf from d2l import tensorflow as d2l class ConvBlock(tf.keras.layers.Layer): def __init__(self, num_channels): super(ConvBlock, self).__init__() self.bn = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same') self.listLayers = [self.bn, self.relu, self.conv] def call(self, x): y = x for layer in self.listLayers.layers: y = layer(y) y = tf.keras.layers.concatenate([x,y], axis=-1) return y class DenseBlock(tf.keras.layers.Layer): def __init__(self, num_convs, num_channels): super(DenseBlock, self).__init__() self.listLayers = [] for _ in range(num_convs): self.listLayers.append(ConvBlock(num_channels)) def call(self, x): for layer in self.listLayers.layers: x = layer(x) return x blk = DenseBlock(2, 10) X = tf.random.uniform((4, 8, 8, 3)) Y = blk(X) Y.shape class TransitionBlock(tf.keras.layers.Layer): def __init__(self, num_channels, **kwargs): super(TransitionBlock, self).__init__(**kwargs) self.batch_norm = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1) self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2) def call(self, x): x = self.batch_norm(x) x = self.relu(x) x = self.conv(x) return self.avg_pool(x) blk = TransitionBlock(10) blk(Y).shape def block_1(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ReLU(), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def block_2(): net = block_1() num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(TransitionBlock(num_channels)) return net def net(): net = block_2() net.add(tf.keras.layers.BatchNormalization()) net.add(tf.keras.layers.ReLU()) net.add(tf.keras.layers.GlobalAvgPool2D()) net.add(tf.keras.layers.Flatten()) net.add(tf.keras.layers.Dense(10)) return net
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Layer): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = paddle.concat(x=[X, Y], axis=1) return X blk = DenseBlock(2, 3, 10) X = paddle.randn([4, 3, 8, 8]) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=1), nn.AvgPool2D(kernel_size=2, stride=2)) blk = transition_block(23, 10) blk(Y).shape b1 = nn.Sequential( nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] blks = [] for i, num_convs in enumerate(num_convs_in_dense_blocks): blks.append(DenseBlock(num_convs, num_channels, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: blks.append(transition_block(num_channels, num_channels // 2)) num_channels = num_channels // 2 net = nn.Sequential( b1, *blks, nn.BatchNorm2D(num_channels), nn.ReLU(), nn.AdaptiveMaxPool2D((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
39
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l T = 1000 time = tf.range(1, T + 1, dtype=tf.float32) x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = tf.Variable(tf.zeros((T - tau, tau))) for i in range(tau): features[:, i].assign(x[i: T - tau + i]) labels = tf.reshape(x[tau:], (-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1)]) return net loss = tf.keras.losses.MeanSquaredError() def train(net, train_iter, loss, epochs, lr): trainer = tf.keras.optimizers.Adam() for epoch in range(epochs): for X, y in train_iter: with tf.GradientTape() as g: out = net(X) l = loss(y, out) params = net.trainable_variables grads = g.gradient(l, params) trainer.apply_gradients(zip(grads, params)) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.numpy(), onestep_preds.numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = tf.Variable(tf.zeros(T)) multistep_preds[:n_train + tau].assign(x[:n_train + tau]) for i in range(n_train + tau, T): multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ())) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.numpy(), onestep_preds.numpy(), multistep_preds[n_train + tau:].numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps))) for i in range(tau): features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy()) for i in range(tau, tau + max_steps): features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1)) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn T = 1000 time = paddle.arange(1, T + 1, dtype=paddle.float32) x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = paddle.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.initializer.XavierUniform(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters()) for epoch in range(epochs): for i,(X, y) in enumerate (train_iter()): trainer.clear_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = paddle.zeros([T]) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = paddle.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape([-1]) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
40
import collections import re from d2l import tensorflow as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
null
null
import collections import re from d2l import paddle as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
41
import random import tensorflow as tf from d2l import tensorflow as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield tf.constant(X), tf.constant(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = tf.constant(corpus[offset: offset + num_tokens]) Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens]) Xs = tf.reshape(Xs, (batch_size, -1)) Ys = tf.reshape(Ys, (batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_batches * num_steps, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield paddle.to_tensor(X), paddle.to_tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = paddle.to_tensor(corpus[offset: offset + num_tokens]) Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
42
import tensorflow as tf from d2l import tensorflow as d2l X, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1) H, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1) tf.matmul(X, W_xh) + tf.matmul(H, W_hh) tf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4)) H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4)) paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh) paddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))
43
%matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True) tf.one_hot(tf.constant([0, 2]), len(vocab)) X = tf.reshape(tf.range(10), (2, 5)) tf.one_hot(tf.transpose(X), 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32) W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32) b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32) W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xh, W_hh, b_h, W_hq, b_q] return params def init_rnn_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.init_state, self.forward_fn = init_state, forward_fn self.trainable_variables = get_params(vocab_size, num_hiddens) def __call__(self, X, state): X = tf.one_hot(tf.transpose(X), self.vocab_size) X = tf.cast(X, tf.float32) return self.forward_fn(X, state, self.trainable_variables) def begin_state(self, batch_size, *args, **kwargs): return self.init_state(batch_size, self.num_hiddens) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_hiddens = 512 with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab): state = net.begin_state(batch_size=1, dtype=tf.float32) outputs = [vocab[prefix[0]]] get_input = lambda: tf.reshape(tf.constant([outputs[-1]]), (1, 1)).numpy() for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.numpy().argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab) def grad_clipping(grads, theta): theta = tf.constant(theta, dtype=tf.float32) new_grad = [] for grad in grads: if isinstance(grad, tf.IndexedSlices): new_grad.append(tf.convert_to_tensor(grad)) else: new_grad.append(grad) norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy() for grad in new_grad)) norm = tf.cast(norm, tf.float32) if tf.greater(norm, theta): for i, grad in enumerate(new_grad): new_grad[i] = grad * theta / norm else: new_grad = new_grad return new_grad def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32) with tf.GradientTape(persistent=True) as g: y_hat, state = net(X, state) y = tf.reshape(tf.transpose(Y), (-1)) l = loss(y, y_hat) params = net.trainable_variables grads = g.gradient(l, params) grads = grad_clipping(grads, 1) updater.apply_gradients(zip(grads, params)) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False): with strategy.scope(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) updater = tf.keras.optimizers.SGD(lr) animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) device = d2l.try_gpu()._device_name num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)
null
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(paddle.to_tensor([0, 2]), len(vocab)) X = paddle.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)* 0.01 W_xh = normal([num_inputs, num_hiddens]) W_hh = normal([num_hiddens, num_hiddens]) b_h = paddle.zeros(shape=[num_hiddens]) W_hq = normal([num_hiddens, num_outputs]) b_q = paddle.zeros(shape=[num_outputs]) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient=False return params def init_rnn_state(batch_size, num_hiddens): return (paddle.zeros(shape=[batch_size, num_hiddens]), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h) Y = paddle.mm(H, W_hq) + b_q outputs.append(Y) return paddle.concat(x=outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size): return self.init_state(batch_size, self.num_hiddens) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1) outputs = [vocab[prefix[0]]] get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1]))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu()) def grad_clipping(net, theta): if isinstance(net, nn.Layer): params = [p for p in net.parameters() if not p.stop_gradient] else: params = net.params norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params)) if norm > theta: with paddle.no_grad(): for param in params: param.grad.set_value(param.grad * theta / norm) def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0]) else: if isinstance(net, nn.Layer) and not isinstance(state, tuple): state.stop_gradient=True else: for s in state: s.stop_gradient=True y = paddle.reshape(Y.T,shape=[-1]) X = paddle.to_tensor(X, place=device) y = paddle.to_tensor(y, place=device) y_hat, state = net(X, state) l = loss(y_hat, y).mean() if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Layer): updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu()) net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
44
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform') rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True) state = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32) state.shape X = tf.random.uniform((num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(tf.keras.layers.Layer): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = tf.keras.layers.Dense(vocab_size) def call(self, inputs, state): X = tf.one_hot(tf.transpose(inputs), self.vocab_size) Y, *state = self.rnn(X, state) output = self.dense(tf.reshape(Y, (-1, Y.shape[-1]))) return output, state def begin_state(self, *args, **kwargs): return self.rnn.cell.get_initial_state(*args, **kwargs) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True) state = paddle.zeros(shape=[1, batch_size, num_hiddens]) state.shape X = paddle.rand(shape=[num_steps, batch_size, len(vocab)]) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if self.rnn.num_directions==1: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]) else: return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]), paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1.0 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
45
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) def three(): return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] return params def init_gru_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z) R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r) H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_epochs, lr = 500, 1 with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) gru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform') gru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(gru_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as F from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_gru_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H,*_ = state outputs = [] for X in inputs: Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H,*_) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
46
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32)) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] return params def init_lstm_state(batch_size, num_hiddens): return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens))) def lstm(inputs, state, params): W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params (H, C) = state outputs = [] for X in inputs: X=tf.reshape(X,[-1,W_xi.shape[0]]) I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i) F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f) O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o) C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * tf.tanh(C) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,C) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name num_epochs, lr = 500, 1 strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) lstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform') lstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as Function from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_lstm_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens])) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * paddle.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
47
import os import tensorflow as tf from d2l import tensorflow as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = tf.constant([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = tf.reduce_sum( tf.cast(array != vocab['<pad>'], tf.int32), 1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', tf.cast(X, tf.int32)) print('Valid length of X:', X_valid_len) print('Y:', tf.cast(Y, tf.int32)) print('Valid length of Y:', Y_valid_len) break
null
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import os import paddle def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(paddle.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y..astype(paddle.int32)) print('Valid length of Y:', Y_valid_len) break
48
null
null
x = np.arange(12) x.size X = x.reshape(3, 4) np.zeros((2, 3, 4)) np.ones((2, 3, 4)) np.random.normal(0, 1, size=(3, 4)) np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = np.array([1, 2, 4, 8]) y = np.array([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y np.exp(x) X = np.arange(12).reshape(3, 4) Y = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) np.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1) a = np.arange(3).reshape(3, 1) b = np.arange(2).reshape(1, 2) Z = np.zeros_like(Y) Z[:] = X + Y A = X.asnumpy() B = np.array(A) a = np.array([3.5]) print(a, a.item(), float(a), int(a))
x = paddle.arange(12) x.numel() X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) X = paddle.arange(12, dtype='float32').reshape((3, 4)) Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1) a = paddle.reshape(paddle.arange(3), (3, 1)) b = paddle.reshape(paddle.arange(2), (1, 2)) Z = paddle.zeros_like(Y) Z = X + Y A = X.numpy() B = paddle.to_tensor(A) type(A), type(B) a = paddle.to_tensor([3.5]) a, a.item(), float(a), int(a)
49
null
null
from mxnet import np X, y = np.array(inputs.values), np.array(outputs.values)
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
50
null
null
from mxnet import np, npx npx.set_np() x = np.array(3.0) y = np.array(2.0) print(x + y, x * y, x / y, x**y) x = np.arange(4) A = np.arange(20).reshape(5, 4) A.T B = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = np.arange(24).reshape(2, 3, 4) A = np.arange(20).reshape(5, 4) B = A.copy() print(A, A + B) a = 2 X = np.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = np.arange(4) print(x, x.sum()) a = A.sum() A.mean() A.sum() / A.size A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) y = np.ones(4) print(np.dot(x, y)) np.sum(x * y) A.shape, x.shape, np.dot(A, x) B = np.ones(shape=(4, 3)) np.dot(A, B) u = np.array([3, -4]) np.linalg.norm(u) np.abs(u).sum() np.linalg.norm(np.ones((4, 9)))
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle.transpose(B, perm=[1, 0]) X = paddle.reshape(paddle.arange(24), (2, 3, 4)) A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4)) B = A.clone() A, A + B a = 2 X = paddle.reshape(paddle.arange(24), (2, 3, 4)) a + X, (a * X).shape x = paddle.arange(4, dtype=paddle.float32) print(x, x.sum()) A.shape, A.sum() A.mean(), A.sum() / A.numel() A.mean(axis=0), A.sum(axis=0) / A.shape[0] sum_A = paddle.sum(A, axis=1, keepdim=True) y = paddle.ones(shape=[4], dtype='float32') x, y, paddle.dot(x, y) paddle.sum(x * y) A.shape, x.shape, paddle.mv(A, x) B = paddle.ones(shape=[4, 3], dtype='float32') paddle.mm(A, B) u = paddle.to_tensor([3.0, -4.0]) paddle.norm(u) paddle.abs(u).sum() paddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))
51
null
null
%matplotlib inline from matplotlib_inline import backend_inline from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
52
null
null
from mxnet import autograd, np, npx npx.set_np() x = np.arange(4.0) x.attach_grad() x.grad with autograd.record(): y = 2 * np.dot(x, x) with autograd.record(): y = x.sum() y.backward() x.grad with autograd.record(): y = x * x y.backward() x.grad with autograd.record(): y = x * x u = y.detach() z = u * x z.backward() x.grad == u y.backward() x.grad == 2 * x def f(a): b = a * 2 while np.linalg.norm(b) < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = np.random.normal() a.attach_grad() with autograd.record(): d = f(a) d.backward()
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backward() x.grad x.clear_gradient() y = x * x u = y.detach() z = u * x paddle.sum(z).backward() x.grad == u x.clear_gradient() paddle.sum(y).backward() x.grad == 2 * x def f(a): b = a * 2 while paddle.norm(b) < 1000: b = b * 2 if paddle.sum(b) > 0: c = b else: c = 100 * b return c a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False) d = f(a) d.backward()
53
null
null
%matplotlib inline import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() fair_probs = [1.0 / 6] * 6 np.random.multinomial(1, fair_probs) np.random.multinomial(10, fair_probs) counts = np.random.multinomial(1000, fair_probs).astype(np.float32)
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000 counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000
54
null
null
counts = np.random.multinomial(10, fair_probs, size=500) cum_counts = counts.astype(np.float32).cumsum(axis=0) estimates = cum_counts / cum_counts.sum(axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].asnumpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); from mxnet import np a = dir(np.random) help(np.ones) np.ones(4)
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend() import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle help(paddle.ones) paddle.ones([4], dtype='float32')
55
null
null
%matplotlib inline import math import time from mxnet import np from d2l import mxnet as d2l n = 10000 a = np.ones(n) b = np.ones(n) c = np.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
56
null
null
%matplotlib inline import random from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() def synthetic_data(w, b, num_examples): X = np.random.normal(0, 1, (num_examples, len(w))) y = np.dot(X, w) + b y += np.random.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = np.array(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): print(X, ' ', y) break w = np.random.normal(0, 0.01, (2, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() def linreg(X, w, b): return np.dot(X, w) + b def sgd(params, lr, batch_size): for param in params: param[:] = param - lr * param.grad / batch_size lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with autograd.record(): l = loss(net(X, w, b), y) l.backward() sgd([w, b], lr, batch_size) train_l = loss(net(features, w, b), labels)
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): break w = paddle.normal(0, 0.01, shape=(2,1)) b = paddle.zeros(shape=[1]) w.stop_gradient = False b.stop_gradient = False def linreg(X, w, b): return paddle.matmul(X, w) + b with paddle.no_grad(): for i, param in enumerate(params): param -= lr * params[i].grad / batch_size params[i].set_value(param) params[i].clear_gradient() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with paddle.no_grad(): train_l = loss(net(features, w, b), labels)
57
null
null
from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = gluon.data.ArrayDataset(*data_arrays) return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from mxnet.gluon import nn net = nn.Sequential() net.add(nn.Dense(1)) from mxnet import init net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.L2Loss() from mxnet import gluon trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03}) w = net[0].weight.data() b = net[0].bias.data()
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(data_arrays) return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True) batch_size = 10 data_iter = load_array((features, labels), batch_size) from paddle import nn net = nn.Sequential(nn.Linear(2, 1)) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01)) bias_attr = paddle.ParamAttr(initializer=None) net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters()) w = net[0].weight b = net[0].bias
58
null
null
%matplotlib inline import sys from mxnet import gluon from d2l import mxnet as d2l d2l.use_svg_display() mnist_train = gluon.data.vision.FashionMNIST(train=True) mnist_test = gluon.data.vision.FashionMNIST(train=False) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.asnumpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = mnist_train[:18] show_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 0 if sys.platform.startswith('win') else 4 transformer = gluon.data.vision.transforms.ToTensor() train_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): dataset = gluon.data.vision trans = [dataset.transforms.ToTensor()] if resize: trans.insert(0, dataset.transforms.Resize(resize)) trans = dataset.transforms.Compose(trans) mnist_train = dataset.FashionMNIST(train=True).transform_first(trans) mnist_test = dataset.FashionMNIST(train=False).transform_first(trans) return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if paddle.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()) trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()), paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))
59
null
null
from IPython import display from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = np.random.normal(0, 0.01, (num_inputs, num_outputs)) b = np.zeros(num_outputs) W.attach_grad() b.attach_grad() X = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdims=True), X.sum(1, keepdims=True) def softmax(X): X_exp = np.exp(X) partition = X_exp.sum(1, keepdims=True) return X_exp / partition X = np.random.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b) y = np.array([0, 2]) y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - np.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) if isinstance(updater, gluon.Trainer): updater = updater.step for X, y in train_iter: with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.size) return metric[0] / metric[2], metric[1] / metric[2]
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=(num_outputs,)) W.stop_gradient=False b.stop_gradient=False X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = paddle.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = paddle.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b) y = paddle.to_tensor([0, 2]) y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) if len(y_hat.shape) < len(y.shape): cmp = y_hat.astype(y.dtype) == y.squeeze() else: cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, paddle.nn.Layer): net.eval() metric = Accumulator(2) with paddle.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, paddle.nn.Layer): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
60
null
null
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential() net.add(nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initializer.Normal(m.weight, std=0.01) net.apply(init_weights); loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())
61
null
null
%matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.relu(x) d2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) with autograd.record(): y = npx.sigmoid(x) d2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) with autograd.record(): y = np.tanh(x) d2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = paddle.nn.functional.sigmoid(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = paddle.tanh(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
62
null
null
from mxnet import gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens)) b1 = np.zeros(num_hiddens) W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs)) b2 = np.zeros(num_outputs) params = [W1, b1, W2, b2] for param in params: param.attach_grad() def relu(X): return np.maximum(X, 0) def net(X): X = X.reshape((-1, num_inputs)) H = relu(np.dot(X, W1) + b1) return np.dot(H, W2) + b2 loss = gluon.loss.SoftmaxCrossEntropyLoss() num_epochs, lr = 10, 0.1 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = False b1 = paddle.zeros([num_hiddens]) b1.stop_gradient = False W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01 W2.stop_gradient = False b2 = paddle.zeros([num_outputs]) b2.stop_gradient = False params = [W1, b1, W2, b2] def relu(X): a = paddle.zeros_like(X) return paddle.maximum(X, a) def net(X): X = X.reshape((-1, num_inputs)) H = relu(X@W1 + b1) return (H@W2 + b2) loss = nn.CrossEntropyLoss(reduction='none') num_epochs, lr = 10, 0.1 updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
63
null
null
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu'), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) batch_size, lr, num_epochs = 256, 0.1, 10 loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01)) layer.weight_attr = weight_attr batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
64
null
null
import math from mxnet import gluon, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(l.sum(), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = gluon.loss.L2Loss() net = nn.Sequential() net.add(nn.Dense(1, use_bias=False)) net.initialize() batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01}) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: out = net(X) y = y.reshape(out.shape) l = loss(out, y) metric.add(l.sum(), l.numel()) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss() input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False) trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
65
null
null
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = np.random.normal(scale=1, size=(num_inputs, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() return [w, b] def l2_penalty(w): return (w**2).sum() / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) + lambd * l2_penalty(w) l.backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=1)) loss = gluon.loss.L2Loss() num_epochs, lr = 100, 0.003 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd}) net.collect_params('.*bias').setattr('wd_mult', 0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = paddle.normal(0, 1, shape=(num_inputs, 1)) w.stop_gradient = False b = paddle.zeros(shape=[1]) b.stop_gradient = False return [w, b] def l2_penalty(w): return paddle.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter(): l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() num_epochs, lr = 100, 0.003 trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() trainer.step() trainer.clear_grad() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
66
null
null
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return np.zeros_like(X) if dropout == 0: return X mask = np.random.uniform(0, 1, X.shape) > dropout return mask.astype(np.float32) * X / (1.0 - dropout) X = np.arange(16).reshape(2, 8) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1)) b1 = np.zeros(num_hiddens1) W2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2)) b2 = np.zeros(num_hiddens2) W3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs)) b3 = np.zeros(num_outputs) params = [W1, b1, W2, b2, W3, b3] for param in params: param.attach_grad() dropout1, dropout2 = 0.2, 0.5 def net(X): X = X.reshape(-1, num_inputs) H1 = npx.relu(np.dot(X, W1) + b1) if autograd.is_training(): H1 = dropout_layer(H1, dropout1) H2 = npx.relu(np.dot(H1, W2) + b2) if autograd.is_training(): H2 = dropout_layer(H2, dropout2) return np.dot(H2, W3) + b3 num_epochs, lr, batch_size = 10, 0.5, 256 loss = gluon.loss.SoftmaxCrossEntropyLoss() train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size)) net = nn.Sequential() net.add(nn.Dense(256, activation="relu"), nn.Dropout(dropout1), nn.Dense(256, activation="relu"), nn.Dropout(dropout2), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) if dropout == 0: return X mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32') return mask * X / (1.0 - dropout) X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8)) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(nn.Layer): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01)) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10, weight_attr=weight_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
67
null
null
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.sigmoid(x) y.backward() d2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = np.random.normal(size=(4, 4)) for i in range(100): M = np.dot(M, np.random.normal(size=(4, 4)))
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.sigmoid(x) y.backward(paddle.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = paddle.normal(0, 1, shape=(4,4)) for i in range(100): M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))
68
null
null
%matplotlib inline import pandas as pd from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train = train_data.shape[0] train_features = np.array(all_features[:n_train].values, dtype=np.float32) test_features = np.array(all_features[n_train:].values, dtype=np.float32) train_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32) loss = gluon.loss.L2Loss() def get_net(): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize() return net def log_rmse(net, features, labels): clipped_preds = np.clip(net(features), 1, float('inf')) return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean()) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = np.concatenate([X_train, X_part], 0) y_train = np.concatenate([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).asnumpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32) test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32) train_labels = paddle.to_tensor( train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32) loss = nn.MSELoss() in_features = train_features.shape[1] def get_net(): net = nn.Sequential(nn.Linear(in_features,1)) return net def log_rmse(net, features, labels): clipped_preds = paddle.clip(net(features), 1, float('inf')) rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() optimizer.step() optimizer.clear_grad() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = paddle.concat([X_train, X_part], 0) y_train = paddle.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).detach().numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
69
null
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() X = np.random.uniform(size=(2, 20)) net(X) class MLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.out = nn.Dense(10) def forward(self, X): return self.out(self.hidden(X)) net = MLP() net.initialize() net(X) class MySequential(nn.Block): def add(self, block): self._children[block.name] = block def forward(self, X): for block in self._children.values(): X = block(X) return X net = MySequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() net(X) class FixedHiddenMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20))) self.dense = nn.Dense(20, activation='relu') def forward(self, X): X = self.dense(X) X = npx.relu(np.dot(X, self.rand_weight.data()) + 1) X = self.dense(X) while np.abs(X).sum() > 1: X /= 2 return X.sum() net = FixedHiddenMLP() net.initialize() net(X) class NestMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu')) self.dense = nn.Dense(16, activation='relu') def forward(self, X): return self.dense(self.net(X)) chimera = nn.Sequential() chimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP()) chimera.initialize() chimera(X)
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) net = MLP() net(X) class MySequential(nn.Layer): def __init__(self, *layers): super(MySequential, self).__init__() if len(layers) > 0 and isinstance(layers[0], tuple): for name, layer in layers: self.add_sublayer(name, layer) else: for idx, layer in enumerate(layers): self.add_sublayer(str(idx), layer) def forward(self, X): for layer in self._sub_layers.values(): X = layer(X) return X net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) net(X) class FixedHiddenMLP(nn.Layer): def __init__(self): super().__init__() self.rand_weight = paddle.rand([20, 20]) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() net = FixedHiddenMLP() net(X) class NestMLP(nn.Layer): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
70
null
null
from mxnet import init, np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(8, activation='relu')) net.add(nn.Dense(1)) net.initialize() X = np.random.uniform(size=(2, 4)) net(X) net.collect_params()['dense1_bias'].data() def block1(): net = nn.Sequential() net.add(nn.Dense(32, activation='relu')) net.add(nn.Dense(16, activation='relu')) return net def block2(): net = nn.Sequential() for _ in range(4): net.add(block1()) return net rgnet = nn.Sequential() rgnet.add(block2()) rgnet.add(nn.Dense(10)) rgnet.initialize() rgnet(X) net.initialize(init=init.Normal(sigma=0.01), force_reinit=True) net[0].weight.data()[0] net.initialize(init=init.Constant(1), force_reinit=True) net[0].weight.data()[0] net[0].weight.initialize(init=init.Xavier(), force_reinit=True) net[1].initialize(init=init.Constant(42), force_reinit=True) class MyInit(init.Initializer): def _init_weight(self, name, data): data[:] = np.random.uniform(-10, 10, data.shape) data *= np.abs(data) >= 5 net.initialize(MyInit(), force_reinit=True) net[0].weight.data()[:2] net[0].weight.data()[:] += 1 net[0].weight.data()[0, 0] = 42 net[0].weight.data()[0] layer = CenteredLayer() layer(np.array([1, 2, 3, 4, 5])) net = nn.Sequential() net.add(nn.Dense(128), CenteredLayer()) net.initialize()
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_sublayer(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: paddle.nn.initializer.Normal(mean=0.0, std=0.01) paddle.zeros(m.bias) net.apply(init_normal) net[0].weight[0],net[0].state_dict()['bias'] def init_constant(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(value = 1) paddle.zeros(m.bias) net.apply(init_constant) net[0].weight[0],net[0].state_dict()['bias'] def xavier(m): if type(m) == nn.Linear: paddle.nn.initializer.XavierUniform(m.weight) def init_42(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(42) net[0].apply(xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: for name, param in m.named_parameters()][0]) paddle.nn.initializer.XavierUniform(m.weight, -10, 10) h = paddle.abs(m.weight) >= 5 h = paddle.to_tensor(h) m = paddle.to_tensor(m.weight) m *= h net.apply(my_init) net[0].weight[:2] net[0].weight.set_value(net[0].weight.numpy() + 1) val = net[0].weight.numpy() val[0, 0] = 42 net[0].weight.set_value(val) net[0].weight[0] layer = CenteredLayer() layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32')) net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
71
null
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() class CenteredLayer(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) def forward(self, X): return X - X.mean() Y = net(np.random.uniform(size=(4, 8))) Y.mean() class MyDense(nn.Block): def __init__(self, units, in_units, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=(in_units, units)) self.bias = self.params.get('bias', shape=(units,)) def forward(self, x): linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data( ctx=x.ctx) return npx.relu(linear) dense = MyDense(units=3, in_units=5) dense.params dense.initialize() dense(np.random.uniform(size=(2, 5))) net = nn.Sequential() net.add(MyDense(8, in_units=64), MyDense(1, in_units=8)) net.initialize() net(np.random.uniform(size=(2, 64)))
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle import paddle.nn.functional as F from paddle import nn class CenteredLayer(nn.Layer): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(paddle.rand([4, 8])) Y.mean() class MyLinear(nn.Layer): def __init__(self, in_units, units): super().__init__() self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32') self.bias = paddle.create_parameter(shape=(units,), dtype='float32') def forward(self, X): linear = paddle.matmul(X, self.weight) + self.bias return F.relu(linear) linear = MyLinear(5, 3) linear.weight linear(paddle.randn([2, 5])) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(paddle.rand([2, 64]))
72
null
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() x = np.arange(4) npx.save('x-file', x) x2 = npx.load('x-file') y = np.zeros(4) npx.save('x-files', [x, y]) x2, y2 = npx.load('x-files') mydict = {'x': x, 'y': y} npx.save('mydict', mydict) mydict2 = npx.load('mydict') class MLP(nn.Block): def __init__(self, **kwargs): super(MLP, self).__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.output = nn.Dense(10) def forward(self, x): return self.output(self.hidden(x)) net = MLP() net.initialize() X = np.random.uniform(size=(2, 20)) Y = net(X) net.save_parameters('mlp.params') clone = MLP() clone.load_parameters('mlp.params')
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn from paddle.nn import functional as F x = paddle.arange(4) paddle.save(x, 'x-file') x2 = paddle.load('x-file') y = paddle.zeros([4]) paddle.save([x,y], 'x-file') x2, y2 = paddle.load('x-file') mydict = {'x': x, 'y': y} paddle.save(mydict, 'mydict') mydict2 = paddle.load('mydict') class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = paddle.randn(shape=[2, 20]) Y = net(X) paddle.save(net.state_dict(), 'mlp.pdparams') clone = MLP() clone.set_state_dict(paddle.load('mlp.pdparams')) clone.eval()
73
null
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() npx.cpu(), npx.gpu(), npx.gpu(1) npx.num_gpus() def try_gpu(i=0): return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu() def try_all_gpus(): devices = [npx.gpu(i) for i in range(npx.num_gpus())] return devices if devices else [npx.cpu()] try_gpu(), try_gpu(10), try_all_gpus() x = np.array([1, 2, 3]) x.ctx X = np.ones((2, 3), ctx=try_gpu()) Y = np.random.uniform(size=(2, 3), ctx=try_gpu(1)) Z = X.copyto(try_gpu(1)) Z.as_in_ctx(try_gpu(1)) is Z net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(ctx=try_gpu()) net[0].weight.data().ctx
import paddle from paddle import nn paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1) paddle.device.cuda.device_count() if paddle.device.cuda.device_count() >= i + 1: return paddle.CUDAPlace(i) return paddle.CPUPlace() def try_all_gpus(): devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())] return devices if devices else paddle.CPUPlace() try_gpu(),try_gpu(10),try_all_gpus() x = paddle.to_tensor([1, 2, 3]) x.place X = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu()) Y = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1)) Z = X.cuda(1) Z.cuda(1) is Z net = nn.Sequential(nn.Linear(3, 1)) net=net.to(try_gpu()) net[0].weight.place
74
null
null
from mxnet import autograd, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def corr2d(X, K): h, w = K.shape Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = np.array([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Block): def __init__(self, kernel_size, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=kernel_size) self.bias = self.params.get('bias', shape=(1,)) def forward(self, x): return corr2d(x, self.weight.data()) + self.bias.data() X = np.ones((6, 8)) X[:, 2:6] = 0 K = np.array([[1.0, -1.0]]) corr2d(d2l.transpose(X), K) conv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False) conv2d.initialize() X = X.reshape(1, 1, 6, 8) Y = Y.reshape(1, 1, 6, 7) lr = 3e-2 for i in range(10): with autograd.record(): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 l.backward() conv2d.weight.data()[:] -= lr * conv2d.weight.grad() conv2d.weight.data().reshape((1, 2))
import warningsfrom d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def corr2d(X, K): h, w = K.shape Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Layer): def __init__(self, kernel_size): super().__init__() self.weight = paddle.ParamAttr(paddle.rand(kernel_size)) self.bias = paddle.ParamAttr(paddle.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = paddle.ones((6, 8)) X[:, 2:6] = 0 K = paddle.to_tensor([[1.0, -1.0]]) corr2d(X.t(), K) conv2d = nn.Conv2D(1, 1, kernel_size=(1, 2)) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.clear_gradients() l.sum().backward() with paddle.no_grad(): conv2d.weight[:] -= lr * conv2d.weight.grad conv2d.weight.reshape((1, 2))
75
null
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() def comp_conv2d(conv2d, X): conv2d.initialize() X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(1, kernel_size=3, padding=1) X = np.random.uniform(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4)) comp_conv2d(conv2d, X).shape
import warnings warnings.filterwarnings(action="https://huggingface.co/datasets/Owos/CodeTransOcean-copy/viewer/dl_trans/ignore") import paddle from paddle import nn def comp_conv2d(conv2d, X): X = paddle.reshape(X, [1, 1] + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1) X = paddle.rand((8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
76
null
null
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return np.stack([corr2d_multi_in(X, k) for k in K], 0) K = np.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = np.dot(K, X) return Y.reshape((c_o, h, w)) X = np.random.normal(0, 1, (3, 3, 3)) K = np.random.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(np.abs(Y1 - Y2).sum()) < 1e-6
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return paddle.stack([corr2d_multi_in(X, k) for k in K], 0) K = paddle.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = paddle.matmul(K, X) return Y.reshape((c_o, h, w)) X = paddle.normal(0, 1, (3, 3, 3)) K = paddle.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(paddle.abs(Y1 - Y2).sum()) < 1e-6
77
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3)) pool2d(X) X = np.concatenate((X, X + 1), 1) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = paddle.arange(16, dtype="float32").reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3, stride=3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3)) pool2d(X) X = paddle.concat((X, X + 1), 1) pool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2) pool2d(X)
78
null
null
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120, activation='sigmoid'), nn.Dense(84, activation='sigmoid'), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 28, 28)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): net.initialize(force_reinit=True, ctx=device, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) for i, (X, y) in enumerate(train_iter): timer.start() X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() trainer.step(X.shape[0]) metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn, optimizer net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = paddle.rand((1, 1, 28, 28), 'float32') for layer in net: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2D: nn.initializer.XavierUniform(m.weight) net.apply(init_weights) net.to(device) optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.clear_grad() X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with paddle.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
79
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add( nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn net = nn.Sequential( nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = paddle.randn(shape=(1, 1, 224, 224)) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
80
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def vgg_block(num_convs, num_channels): blk = nn.Sequential() for _ in range(num_convs): blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu')) blk.add(nn.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = nn.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) return net net = vgg(conv_arch) net.initialize() X = np.random.uniform(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.name, 'output shape: ', X.shape)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2D(kernel_size=2, stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential(*conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = paddle.randn(shape=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
81
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding=1), nn.GlobalAvgPool2D(), nn.Flatten()) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
82
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Inception(nn.Block): def __init__(self, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu') self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu') self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu') self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu') self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu') self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1) self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu') def forward(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return np.concatenate((p1, p2, p3, p4), axis=1) b1 = nn.Sequential() b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b2 = nn.Sequential() b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'), nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b3 = nn.Sequential() b3.add(Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b4 = nn.Sequential() b4.add(Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b5 = nn.Sequential() b5.add(Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), nn.GlobalAvgPool2D()) net = nn.Sequential() net.add(b1, b2, b3, b4, b5, nn.Dense(10)) X = np.random.uniform(size=(1, 1, 96, 96)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn import paddle.nn.functional as F class Inception(nn.Layer): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return paddle.concat(x=[p1, p2, p3, p4], axis=1) b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2,padding=1)) b2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2D(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = paddle.rand(shape=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
83
null
null
from mxnet import autograd, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not autograd.is_training(): X_hat = (X - moving_mean) / np.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(axis=0) var = ((X - mean) ** 2).mean(axis=0) else: mean = X.mean(axis=(0, 2, 3), keepdims=True) var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True) X_hat = (X - mean) / np.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Block): def __init__(self, num_features, num_dims, **kwargs): super().__init__(**kwargs) if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.params.get('gamma', shape=shape, init=init.One()) self.beta = self.params.get('beta', shape=shape, init=init.Zero()) self.moving_mean = np.zeros(shape) self.moving_var = np.ones(shape) def forward(self, X): if self.moving_mean.ctx != X.ctx: self.moving_mean = self.moving_mean.copyto(X.ctx) self.moving_var = self.moving_var.copyto(X.ctx) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma.data(), self.beta.data(), self.moving_mean, self.moving_var, eps=1e-12, momentum=0.9) return Y net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), BatchNorm(120, num_dims=2), nn.Activation('sigmoid'), nn.Dense(84), BatchNorm(84, num_dims=2), nn.Activation('sigmoid'), nn.Dense(10)) net[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,) net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(84), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(10))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True): if not is_training: X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5 else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = paddle.mean(X) var = paddle.mean(((X - mean) ** 2)) else: mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True) var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True) X_hat = (X - mean) / (var + eps) ** 0.5 moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Layer): def __init__(self, num_features, num_dims=4): super(BatchNorm, self).__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32'))) self.beta = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32'))) self.moving_mean = paddle.zeros(shape=shape, dtype='float32') self.moving_var = paddle.zeros(shape=shape, dtype='float32') def forward(self, X): Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training) return Y net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) param = net.parameters() print('gamma:', param[2].numpy().reshape(-1)) print('beta:', param[3].numpy().reshape(-1)) net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(), nn.Linear(84, 10))
84
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Residual(nn.Block): def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs): super().__init__(**kwargs) self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides) self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm() self.bn2 = nn.BatchNorm() def forward(self, X): Y = npx.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return npx.relu(Y + X) blk = Residual(3) blk.initialize() X = np.random.uniform(size=(4, 3, 6, 6)) blk(X).shape blk = Residual(6, use_1x1conv=True, strides=2) blk.initialize() blk(X).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn from paddle.nn import functional as F class Residual(nn.Layer): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super(Residual, self).__init__() self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2D(num_channels) self.bn2 = nn.BatchNorm2D(num_channels) self.relu = nn.ReLU() def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3, 3) X = paddle.rand([4, 3, 6, 6]) Y = blk(X) Y.shape blk = Residual(3, 6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten(), nn.Linear(512, 10)) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
85
null
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) X = np.concatenate((X, Y), axis=1) return X blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk blk = transition_block(10) blk.initialize() blk(Y).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Layer): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = paddle.concat(x=[X, Y], axis=1) return X blk = DenseBlock(2, 3, 10) X = paddle.randn([4, 3, 8, 8]) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=1), nn.AvgPool2D(kernel_size=2, stride=2)) blk = transition_block(23, 10) blk(Y).shape b1 = nn.Sequential( nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] blks = [] for i, num_convs in enumerate(num_convs_in_dense_blocks): blks.append(DenseBlock(num_convs, num_channels, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: blks.append(transition_block(num_channels, num_channels // 2)) num_channels = num_channels // 2 net = nn.Sequential( b1, *blks, nn.BatchNorm2D(num_channels), nn.ReLU(), nn.AdaptiveMaxPool2D((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
86
null
null
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() T = 1000 time = np.arange(1, T + 1, dtype=np.float32) x = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = np.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = nn.Sequential() net.add(nn.Dense(10, activation='relu'), nn.Dense(1)) net.initialize(init.Xavier()) return net loss = gluon.loss.L2Loss() def train(net, train_iter, loss, epochs, lr): trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) for epoch in range(epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.asnumpy(), onestep_preds.asnumpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = np.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.asnumpy(), onestep_preds.asnumpy(), multistep_preds[n_train + tau:].asnumpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = np.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn T = 1000 time = paddle.arange(1, T + 1, dtype=paddle.float32) x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = paddle.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.initializer.XavierUniform(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters()) for epoch in range(epochs): for i,(X, y) in enumerate (train_iter()): trainer.clear_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = paddle.zeros([T]) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = paddle.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape([-1]) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
87
null
null
import collections import re from d2l import mxnet as d2l
import collections import re from d2l import paddle as d2l
88
null
null
import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield np.array(X), np.array(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = np.array(corpus[offset: offset + num_tokens]) Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield paddle.to_tensor(X), paddle.to_tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = paddle.to_tensor(corpus[offset: offset + num_tokens]) Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
89
null
null
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() X, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4)) H, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4)) np.dot(X, W_xh) + np.dot(H, W_hh) np.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4)) H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4)) paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh) paddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))
90
null
null
%matplotlib inline import math from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) npx.one_hot(np.array([0, 2]), len(vocab)) X = np.arange(10).reshape((2, 5)) npx.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = np.zeros(num_hiddens, ctx=device) W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_rnn_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = npx.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, ctx): return self.init_state(batch_size, self.num_hiddens, ctx) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.as_in_context(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, ctx=device) outputs = [vocab[prefix[0]]] get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, gluon.Block): params = [p.data() for p in net.collect_params().values()] else: params = net.params norm = math.sqrt(sum((p.grad ** 2).sum() for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], ctx=device) else: for s in state: s.detach() y = Y.T.reshape(-1) X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat, state = net(X, state) l = loss(y_hat, y).mean() l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = gluon.loss.SoftmaxCrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, gluon.Block): net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) updater = lambda batch_size: trainer.step(batch_size) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(paddle.to_tensor([0, 2]), len(vocab)) X = paddle.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)* 0.01 W_xh = normal([num_inputs, num_hiddens]) W_hh = normal([num_hiddens, num_hiddens]) b_h = paddle.zeros(shape=[num_hiddens]) W_hq = normal([num_hiddens, num_outputs]) b_q = paddle.zeros(shape=[num_outputs]) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient=False return params def init_rnn_state(batch_size, num_hiddens): return (paddle.zeros(shape=[batch_size, num_hiddens]), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h) Y = paddle.mm(H, W_hq) + b_q outputs.append(Y) return paddle.concat(x=outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size): return self.init_state(batch_size, self.num_hiddens) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1) outputs = [vocab[prefix[0]]] get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1]))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Layer): params = [p for p in net.parameters() if not p.stop_gradient] else: params = net.params norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params)) if norm > theta: with paddle.no_grad(): for param in params: param.grad.set_value(param.grad * theta / norm) def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0]) else: if isinstance(net, nn.Layer) and not isinstance(state, tuple): state.stop_gradient=True else: for s in state: s.stop_gradient=True y = paddle.reshape(Y.T,shape=[-1]) X = paddle.to_tensor(X, place=device) y = paddle.to_tensor(y, place=device) y_hat, state = net(X, state) l = loss(y_hat, y).mean() if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Layer): updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
91
null
null
from mxnet import np, npx from mxnet.gluon import nn, rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = rnn.RNN(num_hiddens) rnn_layer.initialize() state = rnn_layer.begin_state(batch_size=batch_size) len(state), state[0].shape X = np.random.uniform(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(nn.Block): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = nn.Dense(vocab_size) def forward(self, inputs, state): X = npx.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.dense(Y.reshape(-1, Y.shape[-1])) return output, state def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) device = d2l.try_gpu() net = RNNModel(rnn_layer, len(vocab)) net.initialize(force_reinit=True, ctx=device) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True) state = paddle.zeros(shape=[1, batch_size, num_hiddens]) state.shape X = paddle.rand(shape=[num_steps, batch_size, len(vocab)]) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if self.rnn.num_directions==1: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]) else: return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]), paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1.0 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
92
null
null
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_gru_state(batch_size, num_hiddens, device): return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z) R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r) H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) gru_layer = rnn.GRU(num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as F from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_gru_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H,*_ = state outputs = [] for X in inputs: Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H,*_) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
93
null
null
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.attach_grad() return params def init_lstm_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i) F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f) O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o) C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * np.tanh(C) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) lstm_layer = rnn.LSTM(num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as Function from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_lstm_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens])) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * paddle.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
94
null
null
import os from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = np.array([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(np.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(np.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.astype(np.int32)) print('Valid length of Y:', Y_valid_len) break
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import os import paddle def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(paddle.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y..astype(paddle.int32)) print('Valid length of Y:', Y_valid_len) break
95
null
x = torch.arange(12) x.numel() torch.zeros((2, 3, 4)) torch.ones((2, 3, 4)) torch.randn(3, 4) torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y torch.exp(x) X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) Z = torch.zeros_like(Y) Z[:] = X + Y A = X.numpy() B = torch.tensor(A) a = torch.tensor([3.5]) print(a, a.item(), float(a), int(a))
x = np.arange(12) x.size np.zeros((2, 3, 4)) np.ones((2, 3, 4)) np.random.normal(0, 1, size=(3, 4)) np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = np.array([1, 2, 4, 8]) y = np.array([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y np.exp(x) X = np.arange(12).reshape(3, 4) Y = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) np.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1) a = np.arange(3).reshape(3, 1) b = np.arange(2).reshape(1, 2) Z = np.zeros_like(Y) Z[:] = X + Y A = X.asnumpy() B = np.array(A) a = np.array([3.5]) print(a, a.item(), float(a), int(a))
null
96
null
import torch X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
from mxnet import np X, y = np.array(inputs.values), np.array(outputs.values)
null
97
null
import torch x = torch.tensor(3.0) y = torch.tensor(2.0) print(x + y, x * y, x / y, x**y) x = torch.arange(4) A = torch.arange(20).reshape(5, 4) B = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) X = torch.arange(24).reshape(2, 3, 4) A = torch.arange(20, dtype=torch.float32).reshape(5, 4) B = A.clone() print(A, A + B) a = 2 X = torch.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = torch.arange(4, dtype=torch.float32) print(x, x.sum()) A.mean() A.sum() / A.numel() y = torch.ones(4, dtype = torch.float32) print(torch.dot(x, y)) torch.sum(x * y) A.shape, x.shape, torch.mv(A, x) B = torch.ones(4, 3) torch.mm(A, B) u = torch.tensor([3.0, -4.0]) torch.norm(u) torch.abs(u).sum() torch.norm(torch.ones((4, 9)))
from mxnet import np, npx npx.set_np() x = np.array(3.0) y = np.array(2.0) print(x + y, x * y, x / y, x**y) x = np.arange(4) A = np.arange(20).reshape(5, 4) B = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) X = np.arange(24).reshape(2, 3, 4) A = np.arange(20).reshape(5, 4) B = A.copy() print(A, A + B) a = 2 X = np.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = np.arange(4) print(x, x.sum()) A.mean() A.sum() / A.size y = np.ones(4) print(np.dot(x, y)) np.sum(x * y) A.shape, x.shape, np.dot(A, x) B = np.ones(shape=(4, 3)) np.dot(A, B) u = np.array([3, -4]) np.linalg.norm(u) np.abs(u).sum() np.linalg.norm(np.ones((4, 9)))
null
98
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x
%matplotlib inline from matplotlib_inline import backend_inline from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def f(x): return 3 * x ** 2 - 4 * x
null
99
null
import torch x = torch.arange(4.0) x.requires_grad_(True) x.grad y = 2 * torch.dot(x, x) x.grad.zero_() y = x.sum() y.backward() x.grad x.grad.zero_() y = x * x y.sum().backward() x.grad x.grad.zero_() y = x * x u = y.detach() z = u * x z.sum().backward() x.grad == u x.grad.zero_() y.sum().backward() x.grad == 2 * x def f(a): b = a * 2 while b.norm() < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = torch.randn(size=(), requires_grad=True) d = f(a) d.backward()
from mxnet import autograd, np, npx npx.set_np() x = np.arange(4.0) x.attach_grad() x.grad with autograd.record(): y = 2 * np.dot(x, x) with autograd.record(): y = x.sum() y.backward() x.grad with autograd.record(): y = x * x y.backward() x.grad with autograd.record(): y = x * x u = y.detach() z = u * x z.backward() x.grad == u y.backward() x.grad == 2 * x def f(a): b = a * 2 while np.linalg.norm(b) < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = np.random.normal() a.attach_grad() with autograd.record(): d = f(a) d.backward()
null
100
null
%matplotlib inline import torch from torch.distributions import multinomial from d2l import torch as d2l fair_probs = torch.ones([6]) / 6 multinomial.Multinomial(1, fair_probs).sample() multinomial.Multinomial(10, fair_probs).sample() counts = multinomial.Multinomial(1000, fair_probs).sample()
%matplotlib inline import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() fair_probs = [1.0 / 6] * 6 np.random.multinomial(1, fair_probs) np.random.multinomial(10, fair_probs) counts = np.random.multinomial(1000, fair_probs).astype(np.float32)
null