id
int64
1
564
tensorflow
stringclasses
52 values
pytorch
stringclasses
81 values
mxnet
stringclasses
66 values
paddle
stringclasses
73 values
401
null
import torch from torch import nn from torch.nn import functional as F x = torch.arange(4) torch.save(x, 'x-file') x2 = torch.load('x-file') y = torch.zeros(4) torch.save([x, y],'x-files') x2, y2 = torch.load('x-files') mydict = {'x': x, 'y': y} torch.save(mydict, 'mydict') mydict2 = torch.load('mydict') class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = torch.randn(size=(2, 20)) Y = net(X) torch.save(net.state_dict(), 'mlp.params') clone = MLP() clone.load_state_dict(torch.load('mlp.params')) clone.eval()
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() x = np.arange(4) npx.save('x-file', x) x2 = npx.load('x-file') y = np.zeros(4) npx.save('x-files', [x, y]) x2, y2 = npx.load('x-files') mydict = {'x': x, 'y': y} npx.save('mydict', mydict) mydict2 = npx.load('mydict') class MLP(nn.Block): def __init__(self, **kwargs): super(MLP, self).__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.output = nn.Dense(10) def forward(self, x): return self.output(self.hidden(x)) net = MLP() net.initialize() X = np.random.uniform(size=(2, 20)) Y = net(X) net.save_parameters('mlp.params') clone = MLP() clone.load_parameters('mlp.params')
null
402
null
import torch from torch import nn torch.device('cpu'), torch.device('cuda'), torch.device('cuda:1') torch.cuda.device_count() def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return devices = [torch.device(f'cuda:{i}') return torch.device('cpu') def try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] return devices if devices else [torch.device('cpu')] try_gpu(), try_gpu(10), try_all_gpus() x = torch.tensor([1, 2, 3]) x.device X = torch.ones(2, 3, device=try_gpu()) Y = torch.rand(2, 3, device=try_gpu(1)) Z = X.cuda(1) Z.cuda(1) is Z net = nn.Sequential(nn.Linear(3, 1)) net = net.to(device=try_gpu()) net[0].weight.data.device
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() npx.cpu(), npx.gpu(), npx.gpu(1) npx.num_gpus() def try_gpu(i=0): return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu() def try_all_gpus(): devices = [npx.gpu(i) for i in range(npx.num_gpus())] return devices if devices else [npx.cpu()] try_gpu(), try_gpu(10), try_all_gpus() x = np.array([1, 2, 3]) x.ctx X = np.ones((2, 3), ctx=try_gpu()) Y = np.random.uniform(size=(2, 3), ctx=try_gpu(1)) Z = X.copyto(try_gpu(1)) Z.as_in_ctx(try_gpu(1)) is Z net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(ctx=try_gpu()) net[0].weight.data().ctx
null
403
null
import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 K = torch.tensor([[1.0, -1.0]]) corr2d(X.t(), K) conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() conv2d.weight.data[:] -= lr * conv2d.weight.grad conv2d.weight.data.reshape((1, 2))
from mxnet import autograd, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def corr2d(X, K): h, w = K.shape Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = np.array([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Block): def __init__(self, kernel_size, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=kernel_size) self.bias = self.params.get('bias', shape=(1,)) def forward(self, x): return corr2d(x, self.weight.data()) + self.bias.data() X = np.ones((6, 8)) X[:, 2:6] = 0 K = np.array([[1.0, -1.0]]) corr2d(d2l.transpose(X), K) conv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False) conv2d.initialize() X = X.reshape(1, 1, 6, 8) Y = Y.reshape(1, 1, 6, 7) lr = 3e-2 for i in range(10): with autograd.record(): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 l.backward() conv2d.weight.data()[:] -= lr * conv2d.weight.grad() conv2d.weight.data().reshape((1, 2))
null
404
null
import torch from torch import nn def comp_conv2d(conv2d, X): X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() def comp_conv2d(conv2d, X): conv2d.initialize() X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(1, kernel_size=3, padding=1) X = np.random.uniform(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4)) comp_conv2d(conv2d, X).shape
null
405
null
import torch from d2l import torch as d2l def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return torch.stack([corr2d_multi_in(X, k) for k in K], 0) K = torch.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = torch.matmul(K, X) return Y.reshape((c_o, h, w)) X = torch.normal(0, 1, (3, 3, 3)) K = torch.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return np.stack([corr2d_multi_in(X, k) for k in K], 0) K = np.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = np.dot(K, X) return Y.reshape((c_o, h, w)) X = np.random.normal(0, 1, (3, 3, 3)) K = np.random.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(np.abs(Y1 - Y2).sum()) < 1e-6
null
406
null
import torch from torch import nn from d2l import torch as d2l def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2d(3) pool2d(X) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1)) pool2d(X) X = torch.cat((X, X + 1), 1) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3)) pool2d(X) X = np.concatenate((X, X + 1), 1) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X)
null
407
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ',X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120, activation='sigmoid'), nn.Dense(84, activation='sigmoid'), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 28, 28)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): net.initialize(force_reinit=True, ctx=device, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) for i, (X, y) in enumerate(train_iter): timer.start() X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() trainer.step(X.shape[0]) metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
408
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add( nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
409
null
import torch from torch import nn from d2l import torch as d2l def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2d(kernel_size=2,stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential( *conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = torch.randn(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def vgg_block(num_convs, num_channels): blk = nn.Sequential() for _ in range(num_convs): blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu')) blk.add(nn.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = nn.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) return net net = vgg(conv_arch) net.initialize() X = np.random.uniform(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.name, 'output shape: ', X.shape)
null
410
null
import torch from torch import nn from d2l import torch as d2l def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2d(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2d(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2d(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding=1), nn.GlobalAvgPool2D(), nn.Flatten()) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
411
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Inception(nn.Module): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return torch.cat((p1, p2, p3, p4), dim=1) b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2d((1,1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = torch.rand(size=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Inception(nn.Block): def __init__(self, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu') self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu') self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu') self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu') self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu') self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1) self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu') def forward(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return np.concatenate((p1, p2, p3, p4), axis=1) b1 = nn.Sequential() b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b2 = nn.Sequential() b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'), nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b3 = nn.Sequential() b3.add(Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b4 = nn.Sequential() b4.add(Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b5 = nn.Sequential() b5.add(Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), nn.GlobalAvgPool2D()) net = nn.Sequential() net.add(b1, b2, b3, b4, b5, nn.Dense(10)) X = np.random.uniform(size=(1, 1, 96, 96)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
412
null
import torch from torch import nn from d2l import torch as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not torch.is_grad_enabled(): X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) X_hat = (X - mean) / torch.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)) net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))
from mxnet import autograd, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not autograd.is_training(): X_hat = (X - moving_mean) / np.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(axis=0) var = ((X - mean) ** 2).mean(axis=0) else: mean = X.mean(axis=(0, 2, 3), keepdims=True) var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True) X_hat = (X - mean) / np.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Block): def __init__(self, num_features, num_dims, **kwargs): super().__init__(**kwargs) if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.params.get('gamma', shape=shape, init=init.One()) self.beta = self.params.get('beta', shape=shape, init=init.Zero()) self.moving_mean = np.zeros(shape) self.moving_var = np.ones(shape) def forward(self, X): if self.moving_mean.ctx != X.ctx: self.moving_mean = self.moving_mean.copyto(X.ctx) self.moving_var = self.moving_var.copyto(X.ctx) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma.data(), self.beta.data(), self.moving_mean, self.moving_var, eps=1e-12, momentum=0.9) return Y net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), BatchNorm(120, num_dims=2), nn.Activation('sigmoid'), nn.Dense(84), BatchNorm(84, num_dims=2), nn.Activation('sigmoid'), nn.Dense(10)) net[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,) net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(84), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(10))
null
413
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Residual(nn.Module): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3,3) X = torch.rand(4, 3, 6, 6) Y = blk(X) Y.shape blk = Residual(3,6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(), nn.Linear(512, 10)) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Residual(nn.Block): def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs): super().__init__(**kwargs) self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides) self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm() self.bn2 = nn.BatchNorm() def forward(self, X): Y = npx.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return npx.relu(Y + X) blk = Residual(3) blk.initialize() X = np.random.uniform(size=(4, 3, 6, 6)) blk(X).shape blk = Residual(6, use_1x1conv=True, strides=2) blk.initialize() blk(X).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
414
null
import torch from torch import nn from d2l import torch as d2l def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Module): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = torch.cat((X, Y), dim=1) return X blk = DenseBlock(2, 3, 10) X = torch.randn(4, 3, 8, 8) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=1), nn.AvgPool2d(kernel_size=2, stride=2)) blk = transition_block(23, 10) blk(Y).shape b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] blks = [] for i, num_convs in enumerate(num_convs_in_dense_blocks): blks.append(DenseBlock(num_convs, num_channels, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: blks.append(transition_block(num_channels, num_channels // 2)) num_channels = num_channels // 2 net = nn.Sequential( b1, *blks, nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) X = np.concatenate((X, Y), axis=1) return X blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk blk = transition_block(10) blk.initialize() blk(Y).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10))
null
415
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() T = 1000 time = np.arange(1, T + 1, dtype=np.float32) x = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = np.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = nn.Sequential() net.add(nn.Dense(10, activation='relu'), nn.Dense(1)) net.initialize(init.Xavier()) return net loss = gluon.loss.L2Loss() def train(net, train_iter, loss, epochs, lr): trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) for epoch in range(epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.asnumpy(), onestep_preds.asnumpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = np.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.asnumpy(), onestep_preds.asnumpy(), multistep_preds[n_train + tau:].asnumpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = np.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
416
null
import collections import re from d2l import torch as d2l
import collections import re from d2l import mxnet as d2l
null
417
null
import random import torch from d2l import torch as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield torch.tensor(X), torch.tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = torch.tensor(corpus[offset: offset + num_tokens]) Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield np.array(X), np.array(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = np.array(corpus[offset: offset + num_tokens]) Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
418
null
import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh) torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() X, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4)) H, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4)) np.dot(X, W_xh) + np.dot(H, W_hh) np.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))
null
419
null
%matplotlib inline import math import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(torch.tensor([0, 2]), len(vocab)) X = torch.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = torch.zeros(num_hiddens, device=device) W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h) Y = torch.mm(H, W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size).type(torch.float32) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hiddens, device) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.to(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): state.detach_() else: for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl])
%matplotlib inline import math from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) npx.one_hot(np.array([0, 2]), len(vocab)) X = np.arange(10).reshape((2, 5)) npx.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = np.zeros(num_hiddens, ctx=device) W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_rnn_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = npx.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, ctx): return self.init_state(batch_size, self.num_hiddens, ctx) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.as_in_context(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, ctx=device) outputs = [vocab[prefix[0]]] get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, gluon.Block): params = [p.data() for p in net.collect_params().values()] else: params = net.params norm = math.sqrt(sum((p.grad ** 2).sum() for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], ctx=device) else: for s in state: s.detach() y = Y.T.reshape(-1) X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat, state = net(X, state) l = loss(y_hat, y).mean() l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = gluon.loss.SoftmaxCrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, gluon.Block): net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) updater = lambda batch_size: trainer.step(batch_size) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl])
null
420
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.RNN(len(vocab), num_hiddens) state = torch.zeros((1, batch_size, num_hiddens)) state.shape X = torch.rand(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) net = net.to(device) d2l.predict_ch8('time traveller', 10, net, vocab, device)
from mxnet import np, npx from mxnet.gluon import nn, rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = rnn.RNN(num_hiddens) rnn_layer.initialize() state = rnn_layer.begin_state(batch_size=batch_size) len(state), state[0].shape X = np.random.uniform(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(nn.Block): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = nn.Dense(vocab_size) def forward(self, inputs, state): X = npx.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.dense(Y.reshape(-1, Y.shape[-1])) return output, state def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) device = d2l.try_gpu() net = RNNModel(rnn_layer, len(vocab)) net.initialize(force_reinit=True, ctx=device) d2l.predict_ch8('time traveller', 10, net, vocab, device)
null
421
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_gru_state(batch_size, num_hiddens, device): return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z) R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r) H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) gru_layer = rnn.GRU(num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
422
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.attach_grad() return params def init_lstm_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i) F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f) O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o) C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * np.tanh(C) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H, C) lstm_layer = rnn.LSTM(num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
423
null
import os import torch from d2l import torch as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.type(torch.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.type(torch.int32)) print('Valid length of Y:', Y_valid_len) break
import os from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = np.array([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(np.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(np.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.astype(np.int32)) print('Valid length of Y:', Y_valid_len) break
null
424
null
x = torch.arange(12) X = x.reshape(3, 4) torch.zeros((2, 3, 4)) torch.ones((2, 3, 4)) torch.randn(3, 4) torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y torch.exp(x) X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) Z = torch.zeros_like(Y) Z[:] = X + Y A = X.numpy() B = torch.tensor(A) a = torch.tensor([3.5]) print(a, a.item(), float(a), int(a))
null
x = paddle.arange(12) X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) X = paddle.arange(12, dtype='float32').reshape((3, 4)) Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1) a = paddle.reshape(paddle.arange(3), (3, 1)) b = paddle.reshape(paddle.arange(2), (1, 2)) Z = paddle.zeros_like(Y) Z = X + Y A = X.numpy() B = paddle.to_tensor(A) type(A), type(B) a = paddle.to_tensor([3.5]) a, a.item(), float(a), int(a)
425
null
import torch X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
null
import warnings warnings.filterwarnings(action='ignore') import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
426
null
import torch x = torch.tensor(3.0) y = torch.tensor(2.0) print(x + y, x * y, x / y, x**y) x = torch.arange(4) A = torch.arange(20).reshape(5, 4) A.T B = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = torch.arange(24).reshape(2, 3, 4) A = torch.arange(20, dtype=torch.float32).reshape(5, 4) B = A.clone() print(A, A + B) a = 2 X = torch.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = torch.arange(4, dtype=torch.float32) print(x, x.sum()) a = A.sum() A.mean() A.sum() / A.numel() A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) y = torch.ones(4, dtype = torch.float32) print(torch.dot(x, y)) torch.sum(x * y) A.shape, x.shape, torch.mv(A, x) B = torch.ones(4, 3) torch.mm(A, B) u = torch.tensor([3.0, -4.0]) torch.norm(u) torch.abs(u).sum() torch.norm(torch.ones((4, 9)))
null
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle.transpose(B, perm=[1, 0]) X = paddle.reshape(paddle.arange(24), (2, 3, 4)) A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4)) B = A.clone() A, A + B a = 2 X = paddle.reshape(paddle.arange(24), (2, 3, 4)) a + X, (a * X).shape x = paddle.arange(4, dtype=paddle.float32) print(x, x.sum()) A.shape, A.sum() A.mean(), A.sum() / A.numel() A.mean(axis=0), A.sum(axis=0) / A.shape[0] sum_A = paddle.sum(A, axis=1, keepdim=True) y = paddle.ones(shape=[4], dtype='float32') x, y, paddle.dot(x, y) paddle.sum(x * y) A.shape, x.shape, paddle.mv(A, x) B = paddle.ones(shape=[4, 3], dtype='float32') paddle.mm(A, B) u = paddle.to_tensor([3.0, -4.0]) paddle.norm(u) paddle.abs(u).sum() paddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))
427
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1
428
null
import torch x = torch.arange(4.0) x.requires_grad_(True) x.grad y = 2 * torch.dot(x, x) x.grad.zero_() y = x.sum() y.backward() x.grad x.grad.zero_() y = x * x y.sum().backward() x.grad x.grad.zero_() y = x * x u = y.detach() z = u * x z.sum().backward() x.grad == u x.grad.zero_() y.sum().backward() x.grad == 2 * x def f(a): b = a * 2 while b.norm() < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = torch.randn(size=(), requires_grad=True) d = f(a) d.backward()
null
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backward() x.grad x.clear_gradient() y = x * x u = y.detach() z = u * x paddle.sum(z).backward() x.grad == u x.clear_gradient() paddle.sum(y).backward() x.grad == 2 * x def f(a): b = a * 2 while paddle.norm(b) < 1000: b = b * 2 if paddle.sum(b) > 0: c = b else: c = 100 * b return c a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False) d = f(a) d.backward()
429
null
%matplotlib inline import torch from torch.distributions import multinomial from d2l import torch as d2l fair_probs = torch.ones([6]) / 6 multinomial.Multinomial(1, fair_probs).sample() multinomial.Multinomial(10, fair_probs).sample() counts = multinomial.Multinomial(1000, fair_probs).sample()
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000 counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample() counts / 1000
430
null
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) cum_counts = counts.cumsum(dim=0) estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import torch a = dir(torch.distributions) help(torch.ones) torch.ones(4)
null
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend() import warnings warnings.filterwarnings(action='ignore') import paddle help(paddle.ones) paddle.ones([4], dtype='float32')
431
null
%matplotlib inline import math import time import numpy as np import torch from d2l import torch as d2l n = 10000 a = torch.ones(n) b = torch.ones(n) c = torch.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
432
null
%matplotlib inline import random import torch from d2l import torch as d2l def synthetic_data(w, b, num_examples): X = torch.normal(0, 1, (num_examples, len(w))) y = torch.matmul(X, w) + b y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): print(X, ' ', y) break w = torch.normal(0, 0.01, size=(2,1), requires_grad=True) b = torch.zeros(1, requires_grad=True) def linreg(X, w, b): return torch.matmul(X, w) + b def sgd(params, lr, batch_size): with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels)
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] batch_size = 10 for X, y in data_iter(batch_size, features, labels): break w = paddle.normal(0, 0.01, shape=(2,1)) b = paddle.zeros(shape=[1]) w.stop_gradient = False b.stop_gradient = False def linreg(X, w, b): return paddle.matmul(X, w) + b with paddle.no_grad(): for i, param in enumerate(params): param -= lr * params[i].grad / batch_size params[i].set_value(param) params[i].clear_gradient() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with paddle.no_grad(): train_l = loss(net(features, w, b), labels)
433
null
import numpy as np import torch from torch.utils import data from d2l import torch as d2l true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from torch import nn net = nn.Sequential(nn.Linear(2, 1)) net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) trainer = torch.optim.SGD(net.parameters(), lr=0.03) w = net[0].weight.data b = net[0].bias.data
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(data_arrays) return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True) batch_size = 10 data_iter = load_array((features, labels), batch_size) from paddle import nn net = nn.Sequential(nn.Linear(2, 1)) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01)) bias_attr = paddle.ParamAttr(initializer=None) net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters()) w = net[0].weight b = net[0].bias
434
null
%matplotlib inline import torch import torchvision from torch.utils import data from torchvision import transforms from d2l import torch as d2l d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = torchvision.datasets.FashionMNIST( root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST( root="../data", train=False, transform=trans, download=True) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if torch.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(data.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if paddle.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()) trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans) return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()), paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))
435
null
import torch from IPython import display from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = torch.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = torch.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b) y = torch.tensor([0, 2]) y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - torch.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.type(y.dtype) == y return float(cmp.type(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, torch.nn.Module): net.eval() metric = Accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=(num_outputs,)) W.stop_gradient=False b.stop_gradient=False X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = paddle.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = paddle.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b) y = paddle.to_tensor([0, 2]) y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) if len(y_hat.shape) < len(y.shape): cmp = y_hat.astype(y.dtype) == y.squeeze() else: cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, paddle.nn.Layer): net.eval() metric = Accumulator(2) with paddle.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, paddle.nn.Layer): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2]
436
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=0.1)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initializer.Normal(m.weight, std=0.01) net.apply(init_weights); trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())
437
null
%matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = paddle.nn.functional.sigmoid(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = paddle.tanh(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.clear_gradient() y.backward(paddle.ones_like(x), retain_graph=True) d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
438
null
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = nn.Parameter(torch.randn( num_inputs, num_hiddens, requires_grad=True) * 0.01) b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True)) W2 = nn.Parameter(torch.randn( num_hiddens, num_outputs, requires_grad=True) * 0.01) b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True)) params = [W1, b1, W2, b2] def relu(X): a = torch.zeros_like(X) return torch.max(X, a) num_epochs, lr = 10, 0.1 updater = torch.optim.SGD(params, lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = False b1 = paddle.zeros([num_hiddens]) b1.stop_gradient = False W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01 W2.stop_gradient = False b2 = paddle.zeros([num_outputs]) b2.stop_gradient = False params = [W1, b1, W2, b2] def relu(X): a = paddle.zeros_like(X) return paddle.maximum(X, a) num_epochs, lr = 10, 0.1 updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
439
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01)) layer.weight_attr = weight_attr batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
440
null
import math import numpy as np import torch from torch import nn from d2l import torch as d2l true_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss(reduction='none') input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False) trainer = torch.optim.SGD(net.parameters(), lr=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss() input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False) trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)
441
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return torch.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential(nn.Linear(num_inputs, 1)) for param in net.parameters(): param.data.normal_() loss = nn.MSELoss(reduction='none') num_epochs, lr = 100, 0.003 trainer = torch.optim.SGD([{"params":net[0].weight,'weight_decay': wd}, {"params":net[0].bias}], lr=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.mean().backward() trainer.step() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = paddle.normal(0, 1, shape=(num_inputs, 1)) w.stop_gradient = False b = paddle.zeros(shape=[1]) b.stop_gradient = False return [w, b] def l2_penalty(w): return paddle.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter(): l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0)) net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr)) loss = nn.MSELoss() num_epochs, lr = 100, 0.003 trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() trainer.step() trainer.clear_grad() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
442
null
import torch from torch import nn from d2l import torch as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return torch.zeros_like(X) if dropout == 0: return X mask = (torch.rand(X.shape) > dropout).float() return mask * X / (1.0 - dropout) X= torch.arange(16, dtype = torch.float32).reshape((2, 8)) dropout1, dropout2 = 0.2, 0.5 class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
import warnings warnings.filterwarnings(action='ignore') import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) if dropout == 0: return X mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32') return mask * X / (1.0 - dropout) X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8)) dropout1, dropout2 = 0.2, 0.5 class Net(nn.Layer): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01)) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256, weight_attr=weight_attr), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10, weight_attr=weight_attr)) trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
443
null
trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.sigmoid(x) y.backward(torch.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = torch.normal(0, 1, size=(4,4)) for i in range(100): M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))
null
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.sigmoid(x) y.backward(paddle.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = paddle.normal(0, 1, shape=(4,4)) for i in range(100): M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))
444
null
%matplotlib inline import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l n_train = train_data.shape[0] train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32) test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32) train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32) def log_rmse(net, features, labels): clipped_preds = torch.clamp(net(features), 1, float('inf')) rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay) for epoch in range(num_epochs): for X, y in train_iter: optimizer.zero_grad() l = loss(net(X), y) l.backward() optimizer.step() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = torch.cat([X_train, X_part], 0) y_train = torch.cat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid
null
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action='ignore') import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32) test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32) train_labels = paddle.to_tensor( train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32) def log_rmse(net, features, labels): clipped_preds = paddle.clip(net(features), 1, float('inf')) rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) l.backward() optimizer.step() optimizer.clear_grad() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = paddle.concat([X_train, X_part], 0) y_train = paddle.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid
445
null
import torch from torch import nn from torch.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = torch.rand(2, 20) net(X) class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Module): def __init__(self, *args): super().__init__() for idx, module in enumerate(args): self._modules[str(idx)] = module def forward(self, X): for block in self._modules.values(): X = block(X) return X class FixedHiddenMLP(nn.Module): def __init__(self): super().__init__() self.rand_weight = torch.rand((20, 20), requires_grad=False) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(torch.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Module): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Layer): def __init__(self, *layers): super(MySequential, self).__init__() if len(layers) > 0 and isinstance(layers[0], tuple): for name, layer in layers: self.add_sublayer(name, layer) else: for idx, layer in enumerate(layers): self.add_sublayer(str(idx), layer) def forward(self, X): for layer in self._sub_layers.values(): X = layer(X) return X class FixedHiddenMLP(nn.Layer): def __init__(self): super().__init__() self.rand_weight = paddle.rand([20, 20]) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Layer): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
446
null
import torch from torch import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = torch.rand(size=(2, 4)) net(X) net.state_dict()['2.bias'].data def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_module(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, mean=0, std=0.01) nn.init.zeros_(m.bias) net.apply(init_normal) net[0].weight.data[0], net[0].bias.data[0] def init_constant(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 1) nn.init.zeros_(m.bias) net.apply(init_constant) net[0].weight.data[0], net[0].bias.data[0] def init_xavier(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def init_42(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 42) net[0].apply(init_xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: nn.init.uniform_(m.weight, -10, 10) m.weight.data *= m.weight.data.abs() >= 5 net.apply(my_init) net[0].weight[:2] net[0].weight.data[:] += 1 net[0].weight.data[0, 0] = 42 net[0].weight.data[0] layer = CenteredLayer() layer(torch.FloatTensor([1, 2, 3, 4, 5]))
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_sublayer(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: paddle.nn.initializer.Normal(mean=0.0, std=0.01) paddle.zeros(m.bias) net.apply(init_normal) net[0].weight[0],net[0].state_dict()['bias'] def init_constant(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(value = 1) paddle.zeros(m.bias) net.apply(init_constant) net[0].weight[0],net[0].state_dict()['bias'] def xavier(m): if type(m) == nn.Linear: paddle.nn.initializer.XavierUniform(m.weight) def init_42(m): if type(m) == nn.Linear: paddle.nn.initializer.Constant(42) net[0].apply(xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: for name, param in m.named_parameters()][0]) paddle.nn.initializer.XavierUniform(m.weight, -10, 10) h = paddle.abs(m.weight) >= 5 h = paddle.to_tensor(h) m = paddle.to_tensor(m.weight) m *= h net.apply(my_init) net[0].weight[:2] net[0].weight.set_value(net[0].weight.numpy() + 1) val = net[0].weight.numpy() val[0, 0] = 42 net[0].weight.set_value(val) net[0].weight[0] layer = CenteredLayer() layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))
447
null
import torch import torch.nn.functional as F from torch import nn class CenteredLayer(nn.Module): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(torch.rand(4, 8)) Y.mean() class MyLinear(nn.Module): def __init__(self, in_units, units): super().__init__() self.weight = nn.Parameter(torch.randn(in_units, units)) self.bias = nn.Parameter(torch.randn(units,)) def forward(self, X): linear = torch.matmul(X, self.weight.data) + self.bias.data return F.relu(linear) linear(torch.rand(2, 5)) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(torch.rand(2, 64))
null
import warnings warnings.filterwarnings(action='ignore') import paddle import paddle.nn.functional as F from paddle import nn class CenteredLayer(nn.Layer): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(paddle.rand([4, 8])) Y.mean() class MyLinear(nn.Layer): def __init__(self, in_units, units): super().__init__() self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32') self.bias = paddle.create_parameter(shape=(units,), dtype='float32') def forward(self, X): linear = paddle.matmul(X, self.weight) + self.bias return F.relu(linear) linear(paddle.randn([2, 5])) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(paddle.rand([2, 64]))
448
null
import torch from torch import nn from torch.nn import functional as F x = torch.arange(4) torch.save(x, 'x-file') x2 = torch.load('x-file') y = torch.zeros(4) torch.save([x, y],'x-files') x2, y2 = torch.load('x-files') mydict = {'x': x, 'y': y} torch.save(mydict, 'mydict') mydict2 = torch.load('mydict') class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = torch.randn(size=(2, 20)) Y = net(X) torch.save(net.state_dict(), 'mlp.params') clone = MLP() clone.load_state_dict(torch.load('mlp.params')) clone.eval()
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn from paddle.nn import functional as F x = paddle.arange(4) paddle.save(x, 'x-file') x2 = paddle.load('x-file') y = paddle.zeros([4]) paddle.save([x,y], 'x-file') x2, y2 = paddle.load('x-file') mydict = {'x': x, 'y': y} paddle.save(mydict, 'mydict') mydict2 = paddle.load('mydict') class MLP(nn.Layer): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = paddle.randn(shape=[2, 20]) Y = net(X) paddle.save(net.state_dict(), 'mlp.pdparams') clone = MLP() clone.set_state_dict(paddle.load('mlp.pdparams')) clone.eval()
449
null
import torch from torch import nn torch.device('cpu'), torch.device('cuda'), torch.device('cuda:1') torch.cuda.device_count() def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return devices = [torch.device(f'cuda:{i}') return torch.device('cpu') def try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] return devices if devices else [torch.device('cpu')] try_gpu(), try_gpu(10), try_all_gpus() x = torch.tensor([1, 2, 3]) x.device X = torch.ones(2, 3, device=try_gpu()) Y = torch.rand(2, 3, device=try_gpu(1)) net = nn.Sequential(nn.Linear(3, 1)) net = net.to(device=try_gpu()) net[0].weight.data.device
null
import paddle from paddle import nn paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1) paddle.device.cuda.device_count() if paddle.device.cuda.device_count() >= i + 1: return paddle.CUDAPlace(i) return paddle.CPUPlace() def try_all_gpus(): devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())] return devices if devices else paddle.CPUPlace() try_gpu(),try_gpu(10),try_all_gpus() x = paddle.to_tensor([1, 2, 3]) x.place X = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu()) Y = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1)) net = nn.Sequential(nn.Linear(3, 1)) net=net.to(try_gpu()) net[0].weight.place
450
null
import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 K = torch.tensor([[1.0, -1.0]]) conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() conv2d.weight.data[:] -= lr * conv2d.weight.grad conv2d.weight.data.reshape((1, 2))
null
import warningsfrom d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def corr2d(X, K): h, w = K.shape Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Layer): def __init__(self, kernel_size): super().__init__() self.weight = paddle.ParamAttr(paddle.rand(kernel_size)) self.bias = paddle.ParamAttr(paddle.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = paddle.ones((6, 8)) X[:, 2:6] = 0 K = paddle.to_tensor([[1.0, -1.0]]) conv2d = nn.Conv2D(1, 1, kernel_size=(1, 2)) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.clear_gradients() l.sum().backward() with paddle.no_grad(): conv2d.weight[:] -= lr * conv2d.weight.grad conv2d.weight.reshape((1, 2))
451
null
import torch from torch import nn def comp_conv2d(conv2d, X): X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn def comp_conv2d(conv2d, X): X = paddle.reshape(X, [1, 1] + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1) X = paddle.rand((8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
452
null
import torch from d2l import torch as d2l def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return torch.stack([corr2d_multi_in(X, k) for k in K], 0) K = torch.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = torch.matmul(K, X) return Y.reshape((c_o, h, w)) X = torch.normal(0, 1, (3, 3, 3)) K = torch.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return paddle.stack([corr2d_multi_in(X, k) for k in K], 0) K = paddle.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = paddle.matmul(K, X) return Y.reshape((c_o, h, w)) X = paddle.normal(0, 1, (3, 3, 3)) K = paddle.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(paddle.abs(Y1 - Y2).sum()) < 1e-6
453
null
import torch from torch import nn from d2l import torch as d2l def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2d(3) pool2d(X) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1)) pool2d(X) X = torch.cat((X, X + 1), 1) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = paddle.arange(16, dtype="float32").reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3, stride=3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3)) pool2d(X) X = paddle.concat((X, X + 1), 1) pool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2) pool2d(X)
454
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ',X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn, optimizer net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = paddle.rand((1, 1, 28, 28), 'float32') for layer in net: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2D: nn.initializer.XavierUniform(m.weight) net.apply(init_weights) net.to(device) optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.clear_grad() X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with paddle.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
455
null
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn net = nn.Sequential( nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = paddle.randn(shape=(1, 1, 224, 224)) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
456
null
import torch from torch import nn from d2l import torch as d2l def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2d(kernel_size=2,stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential( *conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = torch.randn(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2D(kernel_size=2, stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential(*conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = paddle.randn(shape=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape)
457
null
import torch from torch import nn from d2l import torch as d2l def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2d(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2d(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2d(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2D(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
458
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Inception(nn.Module): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return torch.cat((p1, p2, p3, p4), dim=1) b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2d((1,1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = torch.rand(size=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn import paddle.nn.functional as F class Inception(nn.Layer): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return paddle.concat(x=[p1, p2, p3, p4], axis=1) b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2,padding=1)) b2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2D(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = paddle.rand(shape=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
459
null
import torch from torch import nn from d2l import torch as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not torch.is_grad_enabled(): X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) X_hat = (X - mean) / torch.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)) net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True): if not is_training: X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5 else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = paddle.mean(X) var = paddle.mean(((X - mean) ** 2)) else: mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True) var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True) X_hat = (X - mean) / (var + eps) ** 0.5 moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Layer): def __init__(self, num_features, num_dims=4): super(BatchNorm, self).__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32'))) self.beta = self.create_parameter( attr=None, shape=shape, dtype='float32', is_bias=False, default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32'))) self.moving_mean = paddle.zeros(shape=shape, dtype='float32') self.moving_var = paddle.zeros(shape=shape, dtype='float32') def forward(self, X): Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training) return Y net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) param = net.parameters() print('gamma:', param[2].numpy().reshape(-1)) print('beta:', param[3].numpy().reshape(-1)) net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(), nn.MaxPool2D(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(), nn.Linear(84, 10))
460
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Residual(nn.Module): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3,3) X = torch.rand(4, 3, 6, 6) Y = blk(X) Y.shape blk = Residual(3,6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(), nn.Linear(512, 10)) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn from paddle.nn import functional as F class Residual(nn.Layer): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super(Residual, self).__init__() self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2D(num_channels) self.bn2 = nn.BatchNorm2D(num_channels) self.relu = nn.ReLU() def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3, 3) X = paddle.rand([4, 3, 6, 6]) Y = blk(X) Y.shape blk = Residual(3, 6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2D((1, 1)), nn.Flatten(), nn.Linear(512, 10)) X = paddle.rand(shape=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
461
null
import torch from torch import nn from d2l import torch as d2l def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Module): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = torch.cat((X, Y), dim=1) return X blk = DenseBlock(2, 3, 10) X = torch.randn(4, 3, 8, 8) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=1), nn.AvgPool2d(kernel_size=2, stride=2)) b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) net = nn.Sequential( b1, *blks, nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Layer): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = paddle.concat(x=[X, Y], axis=1) return X blk = DenseBlock(2, 3, 10) X = paddle.randn([4, 3, 8, 8]) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=1), nn.AvgPool2D(kernel_size=2, stride=2)) b1 = nn.Sequential( nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2D(64), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) net = nn.Sequential( b1, *blks, nn.BatchNorm2D(num_channels), nn.ReLU(), nn.AdaptiveMaxPool2D((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
462
null
%matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn T = 1000 time = paddle.arange(1, T + 1, dtype=paddle.float32) x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = paddle.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.initializer.XavierUniform(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters()) for epoch in range(epochs): for i,(X, y) in enumerate (train_iter()): trainer.clear_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) multistep_preds = paddle.zeros([T]) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = paddle.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape([-1]) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
463
null
import collections import re from d2l import torch as d2l
null
import collections import re from d2l import paddle as d2l
464
null
import random import torch from d2l import torch as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield torch.tensor(X), torch.tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = torch.tensor(corpus[offset: offset + num_tokens]) Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield paddle.to_tensor(X), paddle.to_tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = paddle.to_tensor(corpus[offset: offset + num_tokens]) Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
465
null
import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh) torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4)) H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4)) paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh) paddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))
466
null
%matplotlib inline import math import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(torch.tensor([0, 2]), len(vocab)) X = torch.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = torch.zeros(num_hiddens, device=device) W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h) Y = torch.mm(H, W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size).type(torch.float32) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hiddens, device) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.to(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): state.detach_() else: for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(paddle.to_tensor([0, 2]), len(vocab)) X = paddle.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)* 0.01 W_xh = normal([num_inputs, num_hiddens]) W_hh = normal([num_hiddens, num_hiddens]) b_h = paddle.zeros(shape=[num_hiddens]) W_hq = normal([num_hiddens, num_outputs]) b_q = paddle.zeros(shape=[num_outputs]) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient=False return params def init_rnn_state(batch_size, num_hiddens): return (paddle.zeros(shape=[batch_size, num_hiddens]), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h) Y = paddle.mm(H, W_hq) + b_q outputs.append(Y) return paddle.concat(x=outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size): return self.init_state(batch_size, self.num_hiddens) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1) outputs = [vocab[prefix[0]]] get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1]))) return ''.join([vocab.idx_to_token[i] for i in outputs]) def grad_clipping(net, theta): if isinstance(net, nn.Layer): params = [p for p in net.parameters() if not p.stop_gradient] else: params = net.params norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params)) if norm > theta: with paddle.no_grad(): for param in params: param.grad.set_value(param.grad * theta / norm) def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0]) else: if isinstance(net, nn.Layer) and not isinstance(state, tuple): state.stop_gradient=True else: for s in state: s.stop_gradient=True y = paddle.reshape(Y.T,shape=[-1]) X = paddle.to_tensor(X, place=device) y = paddle.to_tensor(y, place=device) y_hat, state = net(X, state) l = loss(y_hat, y).mean() if isinstance(updater, paddle.optimizer.Optimizer): updater.clear_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Layer): updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters()) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
467
null
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.RNN(len(vocab), num_hiddens) state = torch.zeros((1, batch_size, num_hiddens)) state.shape X = torch.rand(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) net = net.to(device) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True) state = paddle.zeros(shape=[1, batch_size, num_hiddens]) state.shape X = paddle.rand(shape=[num_steps, batch_size, len(vocab)]) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if self.rnn.num_directions==1: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]) else: return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]), paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1.0 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
468
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as F from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_gru_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H,*_ = state outputs = [] for X in inputs: Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H,*_) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
469
null
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as Function from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return paddle.randn(shape=shape)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens])) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = paddle.zeros([num_outputs]) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.stop_gradient = False return params def init_lstm_state(batch_size, num_hiddens): return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens])) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * paddle.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return paddle.concat(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1.0 model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
470
null
import os import torch from d2l import torch as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.type(torch.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.type(torch.int32)) print('Valid length of Y:', Y_valid_len) break
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import os import paddle def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(paddle.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y..astype(paddle.int32)) print('Valid length of Y:', Y_valid_len) break
471
x = tf.range(12) tf.size(x) X = tf.reshape(x, (3, 4)) tf.zeros((2, 3, 4)) tf.ones((2, 3, 4)) tf.random.normal(shape=[3, 4]) tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = tf.constant([1.0, 2, 4, 8]) y = tf.constant([2.0, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y tf.exp(x) X = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4)) Y = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) tf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1) tf.reduce_sum(X) a = tf.reshape(tf.range(3), (3, 1)) b = tf.reshape(tf.range(2), (1, 2)) X_var = tf.Variable(X) X_var[1, 2].assign(9) X_var = tf.Variable(X) X_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12) Z = tf.Variable(tf.zeros_like(Y)) Z.assign(X + Y) @tf.function def computation(X, Y): Z = tf.zeros_like(Y) A = X + Y B = A + Y C = B + Y return C + Y computation(X, Y) A = X.numpy() B = tf.constant(A) a = tf.constant([3.5]).numpy() print(a, a.item(), float(a), int(a))
x = torch.arange(12) x.numel() X = x.reshape(3, 4) torch.zeros((2, 3, 4)) torch.ones((2, 3, 4)) torch.randn(3, 4) torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y torch.exp(x) X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) X.sum() a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) X[1, 2] = 9 X[0:2, :] = 12 Z = torch.zeros_like(Y) Z[:] = X + Y before = id(X) X += Y id(X) == before A = X.numpy() B = torch.tensor(A) a = torch.tensor([3.5]) print(a, a.item(), float(a), int(a))
null
null
472
import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values)
import torch X, y = torch.tensor(inputs.values), torch.tensor(outputs.values)
null
null
473
import tensorflow as tf x = tf.constant(3.0) y = tf.constant(2.0) print(x + y, x * y, x / y, x**y) x = tf.range(4) A = tf.reshape(tf.range(20), (5, 4)) tf.transpose(A) B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == tf.transpose(B) X = tf.reshape(tf.range(24), (2, 3, 4)) A = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4)) B = A print(A, A + B) a = 2 X = tf.reshape(tf.range(24), (2, 3, 4)) print(a + X, (a * X).shape) x = tf.range(4, dtype=tf.float32) print(x, tf.reduce_sum(x)) a = tf.reduce_sum(A) A_sum_axis0 = tf.reduce_sum(A, axis=0) A_sum_axis1 = tf.reduce_sum(A, axis=1 tf.reduce_sum(A, axis=[0, 1]) tf.reduce_mean(A) tf.reduce_sum(A) / tf.size(A).numpy() tf.reduce_mean(A, axis=0) tf.reduce_sum(A, axis=0) / A.shape[0] sum_A = tf.reduce_sum(A, axis=1, keepdims=True) tf.cumsum(A, axis=0) y = tf.ones(4, dtype=tf.float32) print(tf.tensordot(x, y, axes=1)) tf.reduce_sum(x * y) A.shape, x.shape, tf.linalg.matvec(A, x) B = tf.ones((4, 3), tf.float32) tf.matmul(A, B) u = tf.constant([3.0, -4.0]) tf.norm(u) tf.reduce_sum(tf.abs(u)) tf.norm(tf.ones((4, 9)))
import torch x = torch.tensor(3.0) y = torch.tensor(2.0) print(x + y, x * y, x / y, x**y) x = torch.arange(4) A = torch.arange(20).reshape(5, 4) A.T B = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = torch.arange(24).reshape(2, 3, 4) A = torch.arange(20, dtype=torch.float32).reshape(5, 4) B = A.clone() print(A, A + B) a = 2 X = torch.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = torch.arange(4, dtype=torch.float32) print(x, x.sum()) a = A.sum() A_sum_axis0 = A.sum(axis=0) A_sum_axis1 = A.sum(axis=1) A.sum(axis=[0, 1]) A.mean() A.sum() / A.numel() A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) A.cumsum(axis=0) y = torch.ones(4, dtype = torch.float32) print(torch.dot(x, y)) torch.sum(x * y) A.shape, x.shape, torch.mv(A, x) B = torch.ones(4, 3) torch.mm(A, B) u = torch.tensor([3.0, -4.0]) torch.norm(u) torch.abs(u).sum() torch.norm(torch.ones((4, 9)))
null
null
474
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import tensorflow as d2l def f(x): return 3 * x ** 2 - 4 * x
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x
null
null
475
import tensorflow as tf x = tf.range(4, dtype=tf.float32) x = tf.Variable(x) with tf.GradientTape() as t: y = 2 * tf.tensordot(x, x, axes=1) x_grad = t.gradient(y, x) x_grad x_grad == 4 * x with tf.GradientTape() as t: y = tf.reduce_sum(x) t.gradient(y, x) with tf.GradientTape() as t: y = x * x t.gradient(y, x) with tf.GradientTape(persistent=True) as t: y = x * x u = tf.stop_gradient(y) z = u * x x_grad = t.gradient(z, x) x_grad == u t.gradient(y, x) == 2 * x def f(a): b = a * 2 while tf.norm(b) < 1000: b = b * 2 if tf.reduce_sum(b) > 0: c = b else: c = 100 * b return c a = tf.Variable(tf.random.normal(shape=())) with tf.GradientTape() as t: d = f(a) d_grad = t.gradient(d, a) d_grad d_grad == d / a
import torch x = torch.arange(4.0) x.requires_grad_(True) x.grad y = 2 * torch.dot(x, x) y.backward() x.grad x.grad == 4 * x x.grad.zero_() y = x.sum() y.backward() x.grad x.grad.zero_() y = x * x y.sum().backward() x.grad x.grad.zero_() y = x * x u = y.detach() z = u * x z.sum().backward() x.grad == u x.grad.zero_() y.sum().backward() x.grad == 2 * x def f(a): b = a * 2 while b.norm() < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = torch.randn(size=(), requires_grad=True) d = f(a) d.backward() a.grad == d / a
null
null
476
%matplotlib inline import numpy as np import tensorflow as tf import tensorflow_probability as tfp from d2l import tensorflow as d2l fair_probs = tf.ones(6) / 6 tfp.distributions.Multinomial(1, fair_probs).sample() tfp.distributions.Multinomial(10, fair_probs).sample() counts = tfp.distributions.Multinomial(1000, fair_probs).sample()
%matplotlib inline import torch from torch.distributions import multinomial from d2l import torch as d2l fair_probs = torch.ones([6]) / 6 multinomial.Multinomial(1, fair_probs).sample() multinomial.Multinomial(10, fair_probs).sample() counts = multinomial.Multinomial(1000, fair_probs).sample()
null
null
477
counts = tfp.distributions.Multinomial(10, fair_probs).sample(500) cum_counts = tf.cumsum(counts, axis=0) estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import tensorflow as tf a = dir(tf.random) help(tf.ones) tf.ones(4)
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) cum_counts = counts.cumsum(dim=0) estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import torch a = dir(torch.distributions) help(torch.ones) torch.ones(4)
null
null
478
%matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l n = 10000 a = tf.ones(n) b = tf.ones(n) c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i])
%matplotlib inline import math import time import numpy as np import torch from d2l import torch as d2l n = 10000 a = torch.ones(n) b = torch.ones(n) c = torch.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i]
null
null
479
%matplotlib inline import random import tensorflow as tf from d2l import tensorflow as d2l def synthetic_data(w, b, num_examples): X = tf.zeros((num_examples, w.shape[0])) X += tf.random.normal(shape=X.shape) y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b y += tf.random.normal(shape=y.shape, stddev=0.01) y = tf.reshape(y, (-1, 1)) return X, y true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): j = tf.constant(indices[i: min(i + batch_size, num_examples)]) yield tf.gather(features, j), tf.gather(labels, j) w = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True) b = tf.Variable(tf.zeros(1), trainable=True) def linreg(X, w, b): return tf.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2 def sgd(params, grads, lr, batch_size): for param, grad in zip(params, grads): param.assign_sub(lr*grad/batch_size) lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with tf.GradientTape() as g: l = loss(net(X, w, b), y) dw, db = g.gradient(l, [w, b]) sgd([w, b], [dw, db], lr, batch_size) train_l = loss(net(features, w, b), labels)
%matplotlib inline import random import torch from d2l import torch as d2l def synthetic_data(w, b, num_examples): X = torch.normal(0, 1, (num_examples, len(w))) y = torch.matmul(X, w) + b y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] w = torch.normal(0, 0.01, size=(2,1), requires_grad=True) b = torch.zeros(1, requires_grad=True) def linreg(X, w, b): return torch.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 def sgd(params, lr, batch_size): with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels)
null
null
480
import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = tf.data.Dataset.from_tensor_slices(data_arrays) if is_train: dataset = dataset.shuffle(buffer_size=1000) dataset = dataset.batch(batch_size) return dataset batch_size = 10 data_iter = load_array((features, labels), batch_size) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1)) initializer = tf.initializers.RandomNormal(stddev=0.01) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer)) loss = tf.keras.losses.MeanSquaredError() trainer = tf.keras.optimizers.SGD(learning_rate=0.03) w = net.get_weights()[0] b = net.get_weights()[1]
import numpy as np import torch from torch.utils import data from d2l import torch as d2l true_w = torch.tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from torch import nn net = nn.Sequential(nn.Linear(2, 1)) net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) loss = nn.MSELoss() trainer = torch.optim.SGD(net.parameters(), lr=0.03) w = net[0].weight.data b = net[0].bias.data
null
null
481
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l d2l.use_svg_display() mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() len(mnist_train[0]), len(mnist_test[0]) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.numpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X = tf.constant(mnist_train[0][:18]) y = tf.constant(mnist_train[1][:18]) show_images(X, 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 train_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0])) def load_data_fashion_mnist(batch_size, resize=None): mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32')) resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y) return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn), tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))
%matplotlib inline import torch import torchvision from torch.utils import data from torchvision import transforms from d2l import torch as d2l d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = torchvision.datasets.FashionMNIST( root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST( root="../data", train=False, transform=trans, download=True) len(mnist_train), len(mnist_test) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): if torch.is_tensor(img): ax.imshow(img.numpy()) else: ax.imshow(img) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = next(iter(data.DataLoader(mnist_train, batch_size=18))) show_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 4 train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
null
482
import tensorflow as tf from IPython import display from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01)) b = tf.Variable(tf.zeros(num_outputs)) X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) tf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True) def softmax(X): X_exp = tf.exp(X) partition = tf.reduce_sum(X_exp, 1, keepdims=True) return X_exp / partition X = tf.random.normal((2, 5), 0, 1) X_prob = softmax(X) X_prob, tf.reduce_sum(X_prob, 1) def net(X): return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b) y_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y = tf.constant([0, 2]) tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])) def cross_entropy(y_hat, y): return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = tf.argmax(y_hat, axis=1) cmp = tf.cast(y_hat, y.dtype) == y return float(tf.reduce_sum(tf.cast(cmp, y.dtype))) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) return metric[0] / metric[2], metric[1] / metric[2] class Updater(): def __init__(self, params, lr): self.params = params self.lr = lr def __call__(self, batch_size, grads): d2l.sgd(self.params, grads, self.lr, batch_size) updater = Updater([W, b], lr=0.1) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
import torch from IPython import display from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdim=True), X.sum(1, keepdim=True) def softmax(X): X_exp = torch.exp(X) partition = X_exp.sum(1, keepdim=True) return X_exp / partition X = torch.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b) y = torch.tensor([0, 2]) y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - torch.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.type(y.dtype) == y return float(cmp.type(y.dtype).sum()) def evaluate_accuracy(net, data_iter): if isinstance(net, torch.nn.Module): net.eval() metric = Accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() metric = Accumulator(3) for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.mean().backward() updater.step() else: l.sum().backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.numel()) return metric[0] / metric[2], metric[1] / metric[2] lr = 0.1 def updater(batch_size): return d2l.sgd([W, b], lr, batch_size) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
null
null
483
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = tf.keras.models.Sequential() net.add(tf.keras.layers.Flatten(input_shape=(28, 28))) weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01) net.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer)) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=.1)
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=0.1)
null
null
484
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32) y = tf.nn.relu(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.relu(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = tf.nn.tanh(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.tanh(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
%matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
null
null
485
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01)) b1 = tf.Variable(tf.zeros(num_hiddens)) W2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01)) b2 = tf.Variable(tf.zeros(num_outputs)) params = [W1, b1, W2, b2] def relu(X): return tf.math.maximum(X, 0) def net(X): X = tf.reshape(X, (-1, num_inputs)) H = relu(tf.matmul(X, W1) + b1) return tf.matmul(H, W2) + b2 def loss(y_hat, y): return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True) num_epochs, lr = 10, 0.1 updater = d2l.Updater([W1, W2, b1, b2], lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = nn.Parameter(torch.randn( num_inputs, num_hiddens, requires_grad=True) * 0.01) b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True)) W2 = nn.Parameter(torch.randn( num_hiddens, num_outputs, requires_grad=True) * 0.01) b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True)) params = [W1, b1, W2, b2] def relu(X): a = torch.zeros_like(X) return torch.max(X, a) def net(X): X = X.reshape((-1, num_inputs)) H = relu(X@W1 + b1) return (H@W2 + b2) loss = nn.CrossEntropyLoss(reduction='none') num_epochs, lr = 10, 0.1 updater = torch.optim.SGD(params, lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
null
null
486
import tensorflow as tf from d2l import tensorflow as d2l net = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)]) batch_size, lr, num_epochs = 256, 0.1, 10 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); batch_size, lr, num_epochs = 256, 0.1, 10 loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(), lr=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
null
487
import math import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(tf.reduce_sum(l), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = tf.losses.MeanSquaredError() input_shape = train_features.shape[-1] net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, use_bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = tf.keras.optimizers.SGD(learning_rate=.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
import math import numpy as np import torch from torch import nn from d2l import torch as d2l true_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: out = net(X) y = y.reshape(out.shape) l = loss(out, y) metric.add(l.sum(), l.numel()) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = nn.MSELoss(reduction='none') input_shape = train_features.shape[-1] net = nn.Sequential(nn.Linear(input_shape, 1, bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size) test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False) trainer = torch.optim.SGD(net.parameters(), lr=0.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
null
null
488
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1))) b = tf.Variable(tf.zeros(shape=(1, ))) return [w, b] def l2_penalty(w): return tf.reduce_sum(tf.pow(w, 2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + lambd * l2_penalty(w) grads = tape.gradient(l, [w, b]) d2l.sgd([w, b], grads, lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd))) net.build(input_shape=(1, num_inputs)) w, b = net.trainable_variables loss = tf.keras.losses.MeanSquaredError() num_epochs, lr = 100, 0.003 trainer = tf.keras.optimizers.SGD(learning_rate=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + net.losses grads = tape.gradient(l, net.trainable_variables) trainer.apply_gradients(zip(grads, net.trainable_variables)) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
%matplotlib inline import torch from torch import nn from d2l import torch as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return torch.sum(w.pow(2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y) + lambd * l2_penalty(w) l.sum().backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential(nn.Linear(num_inputs, 1)) for param in net.parameters(): param.data.normal_() loss = nn.MSELoss(reduction='none') num_epochs, lr = 100, 0.003 trainer = torch.optim.SGD([{"params":net[0].weight,'weight_decay': wd}, {"params":net[0].bias}], lr=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.mean().backward() trainer.step() if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
null
489
import tensorflow as tf from d2l import tensorflow as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return tf.zeros_like(X) if dropout == 0: return X mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout) X = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8)) num_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(tf.keras.Model): def __init__(self, num_outputs, num_hiddens1, num_hiddens2): super().__init__() self.input_layer = tf.keras.layers.Flatten() self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu') self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu') self.output_layer = tf.keras.layers.Dense(num_outputs) def call(self, inputs, training=None): x = self.input_layer(inputs) x = self.hidden1(x) if training: x = dropout_layer(x, dropout1) x = self.hidden2(x) if training: x = dropout_layer(x, dropout2) x = self.output_layer(x) return x net = Net(num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout1), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout2), tf.keras.layers.Dense(10), ]) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
import torch from torch import nn from d2l import torch as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return torch.zeros_like(X) if dropout == 0: return X mask = (torch.rand(X.shape) > dropout).float() return mask * X / (1.0 - dropout) X= torch.arange(16, dtype = torch.float32).reshape((2, 8)) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
null
490
trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = tf.random.normal((4, 4)) for i in range(100): M = tf.matmul(M, tf.random.normal((4, 4)))
trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import torch from d2l import torch as d2l x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.sigmoid(x) y.backward(torch.ones_like(x)) d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = torch.normal(0, 1, size=(4,4)) for i in range(100): M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))
null
null
491
%matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from d2l import tensorflow as d2l n_train = train_data.shape[0] train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32) test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32) train_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32) loss = tf.keras.losses.MeanSquaredError() def get_net(): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay))) return net def log_rmse(y_true, y_pred): clipped_preds = tf.clip_by_value(y_pred, 1, float('inf')) return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds)))) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = tf.keras.optimizers.Adam(learning_rate) net.compile(loss=loss, optimizer=optimizer) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) l = loss(y, y_hat) params = net.trainable_variables grads = tape.gradient(l, params) optimizer.apply_gradients(zip(grads, params)) train_ls.append(log_rmse(train_labels, net(train_features))) if test_labels is not None: test_ls.append(log_rmse(test_labels, net(test_features))) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = tf.concat([X_train, X_part], 0) y_train = tf.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
%matplotlib inline import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l n_train = train_data.shape[0] train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32) test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32) train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32) loss = nn.MSELoss() in_features = train_features.shape[1] def get_net(): net = nn.Sequential(nn.Linear(in_features,1)) return net def log_rmse(net, features, labels): clipped_preds = torch.clamp(net(features), 1, float('inf')) rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels))) return rmse.item() def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay) for epoch in range(num_epochs): for X, y in train_iter: optimizer.zero_grad() l = loss(net(X), y) l.backward() optimizer.step() train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = torch.cat([X_train, X_part], 0) y_train = torch.cat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).detach().numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
null
null
492
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) X = tf.random.uniform((2, 20)) net(X) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, X): return self.out(self.hidden((X))) class MySequential(tf.keras.Model): def __init__(self, *args): super().__init__() self.modules = [] for block in args: self.modules.append(block) def call(self, X): for module in self.modules: X = module(X) return X net = MySequential( tf.keras.layers.Dense(units=256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) net(X) class FixedHiddenMLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.rand_weight = tf.constant(tf.random.uniform((20, 20))) self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu) def call(self, inputs): X = self.flatten(inputs) X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1) X = self.dense(X) while tf.reduce_sum(tf.math.abs(X)) > 1: X /= 2 return tf.reduce_sum(X) class NestMLP(tf.keras.Model): def __init__(self): super().__init__() self.net = tf.keras.Sequential() self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu)) self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu) def call(self, inputs): return self.dense(self.net(inputs)) chimera = tf.keras.Sequential() chimera.add(NestMLP()) chimera.add(tf.keras.layers.Dense(20)) chimera.add(FixedHiddenMLP()) chimera(X)
import torch from torch import nn from torch.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = torch.rand(2, 20) net(X) class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.out = nn.Linear(256, 10) def forward(self, X): return self.out(F.relu(self.hidden(X))) class MySequential(nn.Module): def __init__(self, *args): super().__init__() for idx, module in enumerate(args): self._modules[str(idx)] = module def forward(self, X): for block in self._modules.values(): X = block(X) return X net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) net(X) class FixedHiddenMLP(nn.Module): def __init__(self): super().__init__() self.rand_weight = torch.rand((20, 20), requires_grad=False) self.linear = nn.Linear(20, 20) def forward(self, X): X = self.linear(X) X = F.relu(torch.mm(X, self.rand_weight) + 1) X = self.linear(X) while X.abs().sum() > 1: X /= 2 return X.sum() class NestMLP(nn.Module): def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU()) self.linear = nn.Linear(32, 16) def forward(self, X): return self.linear(self.net(X)) chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP()) chimera(X)
null
null
493
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X) net.get_weights()[1] def block1(name): return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name) def block2(): net = tf.keras.Sequential() for i in range(4): net.add(block1(name=f'block-{i}')) return net rgnet = tf.keras.Sequential() rgnet.add(block2()) rgnet.add(tf.keras.layers.Dense(1)) rgnet(X) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1)]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1), ]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()), tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)), ]) net(X) class MyInit(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=None): data=tf.random.uniform(shape, -10, 10, dtype=dtype) factor=(tf.abs(data) >= 5) factor=tf.cast(factor, tf.float32) return data * factor net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()), tf.keras.layers.Dense(1)) net(X) net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1) net.layers[1].weights[0][0, 0].assign(42) net.layers[1].weights[0] layer = CenteredLayer() layer(tf.constant([1, 2, 3, 4, 5])) net = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])
import torch from torch import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = torch.rand(size=(2, 4)) net(X) net.state_dict()['2.bias'].data def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block2(): net = nn.Sequential() for i in range(4): net.add_module(f'block {i}', block1()) return net rgnet = nn.Sequential(block2(), nn.Linear(4, 1)) rgnet(X) def init_normal(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, mean=0, std=0.01) nn.init.zeros_(m.bias) net.apply(init_normal) net[0].weight.data[0], net[0].bias.data[0] def init_constant(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 1) nn.init.zeros_(m.bias) net.apply(init_constant) net[0].weight.data[0], net[0].bias.data[0] def init_xavier(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def init_42(m): if type(m) == nn.Linear: nn.init.constant_(m.weight, 42) net[0].apply(init_xavier) net[2].apply(init_42) def my_init(m): if type(m) == nn.Linear: nn.init.uniform_(m.weight, -10, 10) m.weight.data *= m.weight.data.abs() >= 5 net.apply(my_init) net[0].weight[:2] net[0].weight.data[:] += 1 net[0].weight.data[0, 0] = 42 net[0].weight.data[0] layer = CenteredLayer() layer(torch.FloatTensor([1, 2, 3, 4, 5])) net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
null
null
494
import tensorflow as tf class CenteredLayer(tf.keras.Model): def __init__(self): super().__init__() def call(self, inputs): return inputs - tf.reduce_mean(inputs) Y = net(tf.random.uniform((4, 8))) tf.reduce_mean(Y) class MyDense(tf.keras.Model): def __init__(self, units): super().__init__() self.units = units def build(self, X_shape): self.weight = self.add_weight(name='weight', shape=[X_shape[-1], self.units], initializer=tf.random_normal_initializer()) self.bias = self.add_weight( name='bias', shape=[self.units], initializer=tf.zeros_initializer()) def call(self, X): linear = tf.matmul(X, self.weight) + self.bias return tf.nn.relu(linear) dense = MyDense(3) dense(tf.random.uniform((2, 5))) dense.get_weights() dense(tf.random.uniform((2, 5))) net = tf.keras.models.Sequential([MyDense(8), MyDense(1)]) net(tf.random.uniform((2, 64)))
import torch import torch.nn.functional as F from torch import nn class CenteredLayer(nn.Module): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(torch.rand(4, 8)) Y.mean() class MyLinear(nn.Module): def __init__(self, in_units, units): super().__init__() self.weight = nn.Parameter(torch.randn(in_units, units)) self.bias = nn.Parameter(torch.randn(units,)) def forward(self, X): linear = torch.matmul(X, self.weight.data) + self.bias.data return F.relu(linear) linear = MyLinear(5, 3) linear.weight linear(torch.rand(2, 5)) net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1)) net(torch.rand(2, 64))
null
null
495
import numpy as np import tensorflow as tf x = tf.range(4) np.save('x-file.npy', x) x2 = np.load('x-file.npy', allow_pickle=True) y = tf.zeros(4) np.save('xy-files.npy', [x, y]) x2, y2 = np.load('xy-files.npy', allow_pickle=True) mydict = {'x': x, 'y': y} np.save('mydict.npy', mydict) mydict2 = np.load('mydict.npy', allow_pickle=True) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, inputs): x = self.flatten(inputs) x = self.hidden(x) return self.out(x) net = MLP() X = tf.random.uniform((2, 20)) Y = net(X) net.save_weights('mlp.params') clone = MLP() clone.load_weights('mlp.params')
import torch from torch import nn from torch.nn import functional as F x = torch.arange(4) torch.save(x, 'x-file') x2 = torch.load('x-file') y = torch.zeros(4) torch.save([x, y],'x-files') x2, y2 = torch.load('x-files') mydict = {'x': x, 'y': y} torch.save(mydict, 'mydict') mydict2 = torch.load('mydict') class MLP(nn.Module): def __init__(self): super().__init__() self.hidden = nn.Linear(20, 256) self.output = nn.Linear(256, 10) def forward(self, x): return self.output(F.relu(self.hidden(x))) net = MLP() X = torch.randn(size=(2, 20)) Y = net(X) torch.save(net.state_dict(), 'mlp.params') clone = MLP() clone.load_state_dict(torch.load('mlp.params')) clone.eval()
null
null
496
import tensorflow as tf tf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1') len(tf.config.experimental.list_physical_devices('GPU')) def try_gpu(i=0): if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1: return tf.device(f'/GPU:{i}') return tf.device('/CPU:0') def try_all_gpus(): num_gpus = len(tf.config.experimental.list_physical_devices('GPU')) devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)] return devices if devices else [tf.device('/CPU:0')] try_gpu(), try_gpu(10), try_all_gpus() x = tf.constant([1, 2, 3]) x.device with try_gpu(): X = tf.ones((2, 3)) with try_gpu(1): Y = tf.random.uniform((2, 3)) with try_gpu(1): Z = X with try_gpu(1): Z2 = Z Z2 is Z strategy = tf.distribute.MirroredStrategy() with strategy.scope(): net = tf.keras.models.Sequential([ tf.keras.layers.Dense(1)]) net.layers[0].weights[0].device, net.layers[0].weights[1].device
import torch from torch import nn torch.device('cpu'), torch.device('cuda'), torch.device('cuda:1') torch.cuda.device_count() def try_gpu(i=0): if torch.cuda.device_count() >= i + 1: return devices = [torch.device(f'cuda:{i}') return torch.device('cpu') def try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] return devices if devices else [torch.device('cpu')] try_gpu(), try_gpu(10), try_all_gpus() x = torch.tensor([1, 2, 3]) x.device X = torch.ones(2, 3, device=try_gpu()) Y = torch.rand(2, 3, device=try_gpu(1)) Z = X.cuda(1) Z.cuda(1) is Z net = nn.Sequential(nn.Linear(3, 1)) net = net.to(device=try_gpu()) net[0].weight.data.device
null
null
497
import tensorflow as tf from d2l import tensorflow as d2l def corr2d(X, K): h, w = K.shape Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j].assign(tf.reduce_sum( X[i: i + h, j: j + w] * K)) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = tf.constant([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(tf.keras.layers.Layer): def __init__(self): super().__init__() def build(self, kernel_size): initializer = tf.random_normal_initializer() self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer) self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer) def call(self, inputs): return corr2d(inputs, self.weight) + self.bias X = tf.Variable(tf.ones((6, 8))) X[:, 2:6].assign(tf.zeros(X[:, 2:6].shape)) K = tf.constant([[1.0, -1.0]]) corr2d(tf.transpose(X), K) conv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False) X = tf.reshape(X, (1, 6, 8, 1)) Y = tf.reshape(Y, (1, 6, 7, 1)) lr = 3e-2 Y_hat = conv2d(X) for i in range(10): with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(conv2d.weights[0]) Y_hat = conv2d(X) l = (abs(Y_hat - Y)) ** 2 update = tf.multiply(lr, g.gradient(l, conv2d.weights[0])) weights = conv2d.get_weights() weights[0] = conv2d.weights[0] - update conv2d.set_weights(weights) tf.reshape(conv2d.get_weights()[0], (1, 2))
import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 K = torch.tensor([[1.0, -1.0]]) corr2d(X.t(), K) conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() conv2d.weight.data[:] -= lr * conv2d.weight.grad conv2d.weight.data.reshape((1, 2))
null
null
498
import tensorflow as tf def comp_conv2d(conv2d, X): X = tf.reshape(X, (1, ) + X.shape + (1, )) Y = conv2d(X) return tf.reshape(Y, Y.shape[1:3]) conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same') X = tf.random.uniform(shape=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same') comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4)) comp_conv2d(conv2d, X).shape
import torch from torch import nn def comp_conv2d(conv2d, X): X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
null
null
499
import tensorflow as tf from d2l import tensorflow as d2l def corr2d_multi_in(X, K): return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0) X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return tf.stack([corr2d_multi_in(X, k) for k in K], 0) K = tf.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = tf.reshape(X, (c_i, h * w)) K = tf.reshape(K, (c_o, c_i)) Y = tf.matmul(K, X) return tf.reshape(Y, (c_o, h, w)) X = tf.random.normal((3, 3, 3), 0, 1) K = tf.random.normal((2, 3, 1, 1), 0, 1) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6
import torch from d2l import torch as d2l def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return torch.stack([corr2d_multi_in(X, k) for k in K], 0) K = torch.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = torch.matmul(K, X) return Y.reshape((c_o, h, w)) X = torch.normal(0, 1, (3, 3, 3)) K = torch.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
null
null
500
import tensorflow as tf def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w])) elif mode =='avg': Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w])) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1)) pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3]) pool2d(X) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded) paddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid', strides=(2, 3)) pool2d(X_padded) X = tf.concat([X, X + 1], 3) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded)
import torch from torch import nn from d2l import torch as d2l def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2d(3) pool2d(X) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X) pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1)) pool2d(X) X = torch.cat((X, X + 1), 1) pool2d = nn.MaxPool2d(3, padding=1, stride=2) pool2d(X)
null
null