Skip to content
Switch branches/tags
Go to file
Cannot retrieve contributors at this time
# This file is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import sys
d2l = sys.modules[__name__]
# Defined in file: ./chapter_preface/
from IPython import display
import collections
import os
import sys
import numpy as np
import math
from matplotlib import pyplot as plt
from mxnet import nd, autograd, gluon, init, context, image
from mxnet.gluon import nn, rnn
import random
import re
import time
import tarfile
import zipfile
# Defined in file: ./chapter_crashcourse/
def use_svg_display():
"""Use the svg format to display plot in jupyter."""
# Defined in file: ./chapter_crashcourse/
def set_figsize(figsize=(3.5, 2.5)):
"""Change the default figure size"""
plt.rcParams['figure.figsize'] = figsize
# Defined in file: ./chapter_crashcourse/
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if titles:
return axes
# Defined in file: ./chapter_linear-networks/
class Timer(object):
"""Record multiple running times."""
def __init__(self):
self.times = []
def start(self):
"""Start the timer"""
self.start_time = time.time()
def stop(self):
"""Stop the timer and record the time in a list"""
self.times.append(time.time() - self.start_time)
return self.times[-1]
def avg(self):
"""Return the average time"""
return sum(self.times)/len(self.times)
def sum(self):
"""Return the sum of time"""
return sum(self.times)
def cumsum(self):
"""Return the accumuated times"""
return np.array(self.times).cumsum().tolist()
# Defined in file: ./chapter_linear-networks/
def plot(X, Y=None, xlabel=None, ylabel=None, legend=[], xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
figsize=(3.5, 2.5), axes=None):
"""Plot multiple lines"""
axes = axes if axes else d2l.plt.gca()
if isinstance(X, nd.NDArray): X = X.asnumpy()
if isinstance(Y, nd.NDArray): Y = Y.asnumpy()
if not hasattr(X[0], "__len__"): X = [X]
if Y is None: X, Y = [[]]*len(X), X
if not hasattr(Y[0], "__len__"): Y = [Y]
if len(X) != len(Y): X = X * len(Y)
if not fmts: fmts = ['-']*len(X)
for x, y, fmt in zip(X, Y, fmts):
if isinstance(x, nd.NDArray): x = x.asnumpy()
if isinstance(y, nd.NDArray): y = y.asnumpy()
if len(x):
axes.plot(x, y, fmt)
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
# Defined in file: ./chapter_linear-networks/
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""A utility function to set matplotlib axes"""
if legend: axes.legend(legend)
# Defined in file: ./chapter_linear-networks/
def synthetic_data(w, b, num_examples):
"""generate y = X w + b + noise"""
X = nd.random.normal(scale=1, shape=(num_examples, len(w)))
y =, w) + b
y += nd.random.normal(scale=0.01, shape=y.shape)
return X, y
# Defined in file: ./chapter_linear-networks/
def linreg(X, w, b):
return, w) + b
# Defined in file: ./chapter_linear-networks/
def squared_loss(y_hat, y):
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
# Defined in file: ./chapter_linear-networks/
def sgd(params, lr, batch_size):
for param in params:
param[:] = param - lr * param.grad / batch_size
# Defined in file: ./chapter_linear-networks/
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a Gluon data loader"""
dataset =*data_arrays)
return, batch_size, shuffle=is_train)
# Defined in file: ./chapter_linear-networks/
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# Defined in file: ./chapter_linear-networks/
def get_dataloader_workers(num_workers=4):
# 0 means no additional process is used to speed up the reading of data.
if sys.platform.startswith('win'):
return 0
return num_workers
# Defined in file: ./chapter_linear-networks/
def load_data_fashion_mnist(batch_size, resize=None):
"""Download the Fashion-MNIST dataset and then load into memory."""
dataset =
trans = [dataset.transforms.Resize(resize)] if resize else []
trans = dataset.transforms.Compose(trans)
mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)
mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)
return (, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),, batch_size, shuffle=False,
# Defined in file: ./chapter_linear-networks/
def accuracy(y_hat, y):
return (y_hat.argmax(axis=1) == y.astype('float32')).sum().asscalar()
# Defined in file: ./chapter_linear-networks/
def evaluate_accuracy(net, data_iter):
metric = Accumulator(2) # num_corrected_examples, num_examples
for X, y in data_iter:
y = y.astype('float32')
metric.add(accuracy(net(X), y), y.size)
return metric[0] / metric[1]
# Defined in file: ./chapter_linear-networks/
class Accumulator(object):
"""Sum a list of numbers over time"""
def __init__(self, n): = [0.0] * n
def add(self, *args): = [a+b for a, b in zip(, args)]
def reset(self): = [0] * len(
def __getitem__(self, i):
# Defined in file: ./chapter_linear-networks/
def train_epoch_ch3(net, train_iter, loss, updater):
metric = Accumulator(3) # train_loss_sum, train_acc_sum, num_examples
if isinstance(updater, gluon.Trainer):
updater = updater.step
for X, y in train_iter:
# compute gradients and update parameters
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
metric.add(l.sum().asscalar(), accuracy(y_hat, y), y.size)
# Return training loss and training accuracy
return metric[0]/metric[2], metric[1]/metric[2]
# Defined in file: ./chapter_linear-networks/
class Animator(object):
def __init__(self, xlabel=None, ylabel=None, legend=[], xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
nrows=1, ncols=1, figsize=(3.5, 2.5)):
"""Incrementally plot multiple lines."""
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1: self.axes = [self.axes,]
# use a lambda to capture arguments
self.config_axes = lambda : d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
"""Add multiple data points into the figure."""
if not hasattr(y, "__len__"): y = [y]
n = len(y)
if not hasattr(x, "__len__"): x = [x] * n
if not self.X: self.X = [[] for _ in range(n)]
if not self.Y: self.Y = [[] for _ in range(n)]
if not self.fmts: self.fmts = ['-'] * n
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
# Defined in file: ./chapter_linear-networks/
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
trains, test_accs = [], []
animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch+1, train_metrics+(test_acc,))
# Defined in file: ./chapter_linear-networks/
def predict_ch3(net, test_iter, n=6):
for X, y in test_iter:
trues = d2l.get_fashion_mnist_labels(y.asnumpy())
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true+'\n'+ pred for true, pred in zip(trues, preds)]
d2l.show_images(X[0:n].reshape((n,28,28)), 1, n, titles=titles[0:n])
# Defined in file: ./chapter_multilayer-perceptrons/
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset"""
metric = d2l.Accumulator(2) # sum_loss, num_examples
for X, y in data_iter:
metric.add(loss(net(X), y).sum().asscalar(), y.size)
return metric[0] / metric[1]
# Defined in file: ./chapter_deep-learning-computation/
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu()."""
return context.gpu(i) if context.num_gpus() >= i + 1 else context.cpu()
# Defined in file: ./chapter_deep-learning-computation/
def try_all_gpus():
"""Return all available GPUs, or [cpu(),] if no GPU exists."""
ctxes = [context.gpu(i) for i in range(context.num_gpus())]
return ctxes if ctxes else [context.cpu()]
# Defined in file: ./chapter_convolutional-neural-networks/
def corr2d(X, K):
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
# Defined in file: ./chapter_convolutional-neural-networks/
def evaluate_accuracy_gpu(net, data_iter, ctx=None):
if not ctx: # Query the first device the first parameter is on.
ctx = list(net.collect_params().values())[0].list_ctx()[0]
metric = d2l.Accumulator(2) # num_corrected_examples, num_examples
for X, y in data_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
metric.add(d2l.accuracy(net(X), y), y.size)
return metric[0]/metric[1]
# Defined in file: ./chapter_convolutional-neural-networks/
def train_ch5(net, train_iter, test_iter, num_epochs, lr, ctx=d2l.try_gpu()):
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
animator = d2l.Animator(xlabel='epoch', xlim=[0,num_epochs],
legend=['train loss','train acc','test acc'])
timer = d2l.Timer()
for epoch in range(num_epochs):
metric = d2l.Accumulator(3) # train_loss, train_acc, num_examples
for i, (X, y) in enumerate(train_iter):
# Here is the only difference compared to train_epoch_ch3
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
metric.add(l.sum().asscalar(), d2l.accuracy(y_hat, y), X.shape[0])
train_loss, train_acc = metric[0]/metric[2], metric[1]/metric[2]
if (i+1) % 50 == 0:
animator.add(epoch + i/len(train_iter),
(train_loss, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch+1, (None, None, test_acc))
print('loss %.3f, train acc %.3f, test acc %.3f' % (
train_loss, train_acc, test_acc))
print('%.1f exampes/sec on %s'%(metric[2]*num_epochs/timer.sum(), ctx))
# Defined in file: ./chapter_convolutional-modern/
class Residual(nn.Block):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def forward(self, X):
Y = nd.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return nd.relu(Y + X)
# Defined in file: ./chapter_recurrent-neural-networks/
def read_time_machine():
"""Load the time machine book into a list of sentences."""
with open('../data/timemachine.txt', 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line.strip().lower())
for line in lines]
# Defined in file: ./chapter_recurrent-neural-networks/
def tokenize(lines, token='word'):
"""Split sentences into word or char tokens"""
if token == 'word':
return [line.split(' ') for line in lines]
elif token == 'char':
return [list(line) for line in lines]
print('ERROR: unkown token type '+token)
# Defined in file: ./chapter_recurrent-neural-networks/
class Vocab(object):
def __init__(self, tokens, min_freq=0, use_special_tokens=False):
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
if use_special_tokens:
# padding, begin of sentence, end of sentence, unknown
self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3)
uniq_tokens = ['<pad>', '<bos>', '<eos>', '<unk>']
self.unk, uniq_tokens = 0, ['<unk>']
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
# Defined in file: ./chapter_recurrent-neural-networks/
def count_corpus(sentences):
# Flatten a list of token lists into a list of tokens
tokens = [tk for line in sentences for tk in line]
return collections.Counter(tokens)
# Defined in file: ./chapter_recurrent-neural-networks/
def load_corpus_time_machine(max_tokens=-1):
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[tk] for line in tokens for tk in line]
if max_tokens > 0: corpus = corpus[:max_tokens]
return corpus, vocab
# Defined in file: ./chapter_recurrent-neural-networks/
def seq_data_iter_random(corpus, batch_size, num_steps):
# Offset the iterator over the data for uniform starts
corpus = corpus[random.randint(0, num_steps):]
# Subtract 1 extra since we need to account for label
num_examples = ((len(corpus) - 1) // num_steps)
example_indices = list(range(0, num_examples * num_steps, num_steps))
# This returns a sequence of the length num_steps starting from pos
data = lambda pos: corpus[pos: pos + num_steps]
# Discard half empty batches
num_batches = num_examples // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Batch_size indicates the random examples read each time
batch_indices = example_indices[i:(i+batch_size)]
X = [data(j) for j in batch_indices]
Y = [data(j + 1) for j in batch_indices]
yield nd.array(X), nd.array(Y)
# Defined in file: ./chapter_recurrent-neural-networks/
def seq_data_iter_consecutive(corpus, batch_size, num_steps):
# Offset for the iterator over the data for uniform starts
offset = random.randint(0, num_steps)
# Slice out data - ignore num_steps and just wrap around
num_indices = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = nd.array(corpus[offset:offset+num_indices])
Ys = nd.array(corpus[offset+1:offset+1+num_indices])
Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_batches * num_steps, num_steps):
X = Xs[:,i:(i+num_steps)]
Y = Ys[:,i:(i+num_steps)]
yield X, Y
# Defined in file: ./chapter_recurrent-neural-networks/
class SeqDataLoader(object):
"""A iterator to load sequence data"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
data_iter_fn = d2l.seq_data_iter_random
data_iter_fn = d2l.seq_data_iter_consecutive
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.get_iter = lambda: data_iter_fn(self.corpus, batch_size, num_steps)
def __iter__(self):
return self.get_iter()
# Defined in file: ./chapter_recurrent-neural-networks/
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
# Defined in file: ./chapter_recurrent-neural-networks/
class RNNModelScratch(object):
"""A RNN Model based on scratch implementations"""
def __init__(self, vocab_size, num_hiddens, ctx,
get_params, init_state, forward):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens, ctx)
self.init_state, self.forward_fn = init_state, forward
def __call__(self, X, state):
X = nd.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size, ctx):
return self.init_state(batch_size, self.num_hiddens, ctx)
# Defined in file: ./chapter_recurrent-neural-networks/
def predict_ch8(prefix, num_predicts, model, vocab, ctx):
state = model.begin_state(batch_size=1, ctx=ctx)
outputs = [vocab[prefix[0]]]
get_input = lambda: nd.array([outputs[-1]], ctx=ctx).reshape((1, 1))
for y in prefix[1:]: # Warmup state with prefix
_, state = model(get_input(), state)
for _ in range(num_predicts): # Predict num_predicts steps
Y, state = model(get_input(), state)
return ''.join([vocab.idx_to_token[i] for i in outputs])
# Defined in file: ./chapter_recurrent-neural-networks/
def grad_clipping(model, theta):
if isinstance(model, gluon.Block):
params = [ for p in model.collect_params().values()]
params = model.params
norm = math.sqrt(sum((p.grad ** 2).sum().asscalar() for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
# Defined in file: ./chapter_recurrent-neural-networks/
def train_epoch_ch8(model, train_iter, loss, updater, ctx, use_random_iter):
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # loss_sum, num_examples
for X, Y in train_iter:
if state is None or use_random_iter:
# Initialize state when either it's the first iteration or
# using random sampling.
state = model.begin_state(batch_size=X.shape[0], ctx=ctx)
for s in state: s.detach()
y = Y.T.reshape((-1,))
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
py, state = model(X, state)
l = loss(py, y).mean()
grad_clipping(model, 1)
updater(batch_size=1) # Since used mean already.
metric.add(l.asscalar() * y.size, y.size)
return math.exp(metric[0]/metric[1]), metric[1]/timer.stop()
# Defined in file: ./chapter_recurrent-neural-networks/
def train_ch8(model, train_iter, vocab, lr, num_epochs, ctx,
# Initialize
loss = gluon.loss.SoftmaxCrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[1, num_epochs])
if isinstance(model, gluon.Block):
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr})
updater = lambda batch_size : trainer.step(batch_size)
updater = lambda batch_size : d2l.sgd(model.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, model, vocab, ctx)
# Train and check the progress.
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
model, train_iter, loss, updater, ctx, use_random_iter)
if epoch % 10 == 0:
print(predict('time traveller'))
animator.add(epoch+1, [ppl])
print('Perplexity %.1f, %d tokens/sec on %s' % (ppl, speed, ctx))
print(predict('time traveller'))
# Defined in file: ./chapter_recurrent-neural-networks/
class RNNModel(nn.Block):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = nn.Dense(vocab_size)
def forward(self, inputs, state):
X = nd.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of Y to
# (num_steps * batch_size, num_hiddens)
# Its output shape is (num_steps * batch_size, vocab_size)
output = self.dense(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
# Defined in file: ./chapter_recurrent-neural-networks/
def read_data_nmt():
fname ='')
with zipfile.ZipFile(fname, 'r') as f:
# Defined in file: ./chapter_recurrent-neural-networks/
def preprocess_nmt(text):
text = text.replace('\u202f', ' ').replace('\xa0', ' ')
no_space = lambda char, prev_char: (
True if char in (',', '!', '.') and prev_char != ' ' else False)
out = [' '+char if i > 0 and no_space(char, text[i-1]) else char
for i, char in enumerate(text.lower())]
return ''.join(out)
# Defined in file: ./chapter_recurrent-neural-networks/
def tokenize_nmt(text, num_examples = None):
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples: break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
# Defined in file: ./chapter_recurrent-neural-networks/
def trim_pad(line, num_steps, padding_token):
if len(line) > num_steps: return line[:num_steps] # Trim
return line + [padding_token] * (num_steps - len(line)) # Pad
# Defined in file: ./chapter_recurrent-neural-networks/
def build_array(lines, vocab, num_steps, is_source):
lines = [vocab[l] for l in lines]
if not is_source:
lines = [[vocab.bos] + l + [vocab.eos] for l in lines]
array = nd.array([trim_pad(l, num_steps, vocab.pad) for l in lines])
valid_len = (array != vocab.pad).sum(axis=1)
return array, valid_len
# Defined in file: ./chapter_recurrent-neural-networks/
def load_data_nmt(batch_size, num_steps, num_examples=1000):
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=3, use_special_tokens=True)
tgt_vocab = d2l.Vocab(target, min_freq=3, use_special_tokens=True)
src_array, src_valid_len = build_array(
source, src_vocab, num_steps, True)
tgt_array, tgt_valid_len = build_array(
target, tgt_vocab, num_steps, False)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return src_vocab, tgt_vocab, data_iter
# Defined in file: ./chapter_recurrent-neural-networks/
class Encoder(nn.Block):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-neural-networks/
class Decoder(nn.Block):
"""The base decoder interface for the encoder-decoder archtecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-neural-networks/
class EncoderDecoder(nn.Block):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
# Defined in file: ./chapter_recurrent-neural-networks/
class Seq2SeqEncoder(d2l.Encoder):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=dropout)
def forward(self, X, *args):
X = self.embedding(X) # X shape: (batch_size, seq_len, embed_size)
X = X.swapaxes(0, 1) # RNN needs first axes to be time
state = self.rnn.begin_state(batch_size=X.shape[1], ctx=X.context)
out, state = self.rnn(X, state)
# The shape of out is (seq_len, batch_size, num_hiddens).
# state contains the hidden state and the memory cell
# of the last time step, the shape is (num_layers, batch_size, num_hiddens)
return out, state
# Defined in file: ./chapter_recurrent-neural-networks/
class Seq2SeqDecoder(d2l.Decoder):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqDecoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=dropout)
self.dense = nn.Dense(vocab_size, flatten=False)
def init_state(self, enc_outputs, *args):
return enc_outputs[1]
def forward(self, X, state):
X = self.embedding(X).swapaxes(0, 1)
out, state = self.rnn(X, state)
# Make the batch to be the first dimension to simplify loss computation.
out = self.dense(out).swapaxes(0, 1)
return out, state
# Defined in file: ./chapter_recurrent-neural-networks/
class MaskedSoftmaxCELoss(gluon.loss.SoftmaxCELoss):
# pred shape: (batch_size, seq_len, vocab_size)
# label shape: (batch_size, seq_len)
# valid_length shape: (batch_size, )
def forward(self, pred, label, valid_length):
# the sample weights shape should be (batch_size, seq_len, 1)
weights = nd.ones_like(label).expand_dims(axis=-1)
weights = nd.SequenceMask(weights, valid_length, True, axis=1)
return super(MaskedSoftmaxCELoss, self).forward(pred, label, weights)
# Defined in file: ./chapter_recurrent-neural-networks/
def train_s2s_ch8(model, data_iter, lr, num_epochs, ctx):
model.initialize(init.Xavier(), force_reinit=True, ctx=ctx)
trainer = gluon.Trainer(model.collect_params(),
'adam', {'learning_rate': lr})
loss = MaskedSoftmaxCELoss()
#tic = time.time()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[1, num_epochs], ylim=[0, 0.25])
for epoch in range(1, num_epochs+1):
timer = d2l.Timer()
metric = d2l.Accumulator(2) # loss_sum, num_tokens
for batch in data_iter:
X, X_vlen, Y, Y_vlen = [x.as_in_context(ctx) for x in batch]
Y_input, Y_label, Y_vlen = Y[:,:-1], Y[:,1:], Y_vlen-1
with autograd.record():
Y_hat, _ = model(X, Y_input, X_vlen, Y_vlen)
l = loss(Y_hat, Y_label, Y_vlen)
d2l.grad_clipping(model, 1)
num_tokens = Y_vlen.sum().asscalar()
metric.add(l.sum().asscalar(), num_tokens)
if epoch % 10 == 0:
animator.add(epoch, metric[0]/metric[1])
print('loss %.3f, %d tokens/sec on %s ' % (
metric[0]/metric[1], metric[1]/timer.stop(), ctx))
# Defined in file: ./chapter_recurrent-neural-networks/
def predict_s2s_ch8(model, src_sentence, src_vocab, tgt_vocab, num_steps, ctx):
src_tokens = src_vocab[src_sentence.lower().split(' ')]
enc_valid_length = nd.array([len(src_tokens)], ctx=ctx)
src_tokens = d2l.trim_pad(src_tokens, num_steps, src_vocab.pad)
enc_X = nd.array(src_tokens, ctx=ctx)
# add the batch_size dimension.
enc_outputs = model.encoder(enc_X.expand_dims(axis=0), enc_valid_length)
dec_state = model.decoder.init_state(enc_outputs, enc_valid_length)
dec_X = nd.array([tgt_vocab.bos], ctx=ctx).expand_dims(axis=0)
predict_tokens = []
for _ in range(num_steps):
Y, dec_state = model.decoder(dec_X, dec_state)
# The token with highest score is used as the next time step input.
dec_X = Y.argmax(axis=2)
py = dec_X.squeeze(axis=0).astype('int32').asscalar()
if py == tgt_vocab.eos:
return ' '.join(tgt_vocab.to_tokens(predict_tokens))
# Defined in file: ./chapter_attention-mechanism/
def masked_softmax(X, valid_length):
# X: 3-D tensor, valid_length: 1-D or 2-D tensor
if valid_length is None:
return X.softmax()
shape = X.shape
if valid_length.ndim == 1:
valid_length = valid_length.repeat(shape[1], axis=0)
valid_length = valid_length.reshape((-1,))
# fill masked elements with a large negative, whose exp is 0
X = nd.SequenceMask(X.reshape((-1, shape[-1])), valid_length, True,
axis=1, value=-1e6)
return X.softmax().reshape(shape)
# Defined in file: ./chapter_attention-mechanism/
class DotProductAttention(nn.Block):
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# query: (batch_size, #queries, d)
# key: (batch_size, #kv_pairs, d)
# value: (batch_size, #kv_pairs, dim_v)
# valid_length: either (batch_size, ) or (batch_size, xx)
def forward(self, query, key, value, valid_length=None):
d = query.shape[-1]
# set transpose_b=True to swap the last two dimensions of key
scores = nd.batch_dot(query, key, transpose_b=True) / math.sqrt(d)
attention_weights = self.dropout(masked_softmax(scores, valid_length))
return nd.batch_dot(attention_weights, value)
# Defined in file: ./chapter_attention-mechanism/
class MLPAttention(nn.Block):
def __init__(self, units, dropout, **kwargs):
super(MLPAttention, self).__init__(**kwargs)
# Use flatten=True to keep query's and key's 3-D shapes.
self.W_k = nn.Dense(units, activation='tanh',
use_bias=False, flatten=False)
self.W_q = nn.Dense(units, activation='tanh',
use_bias=False, flatten=False)
self.v = nn.Dense(1, use_bias=False, flatten=False)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, valid_length):
query, key = self.W_k(query), self.W_q(key)
# expand query to (batch_size, #querys, 1, units), and key to
# (batch_size, 1, #kv_pairs, units). Then plus them with broadcast.
features = query.expand_dims(axis=2) + key.expand_dims(axis=1)
scores = self.v(features).squeeze(axis=-1)
attention_weights = self.dropout(masked_softmax(scores, valid_length))
return nd.batch_dot(attention_weights, value)
# Defined in file: ./chapter_optimization/
def annotate(text, xy, xytext):
d2l.plt.gca().annotate(text, xy=xy, xytext=xytext,
# Defined in file: ./chapter_optimization/
def train_2d(trainer):
"""Optimize a 2-dim objective function with a customized trainer."""
# s1 and s2 are internal state variables and will
# be used later in the chapter
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(20):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))
return results
# Defined in file: ./chapter_optimization/
def show_trace_2d(f, results):
"""Show the trace of 2D variables during optimization."""
d2l.set_figsize((3.5, 2.5))
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
# Defined in file: ./chapter_optimization/
def get_data_ch10(batch_size=10, n=1500):
data = np.genfromtxt('../data/airfoil_self_noise.dat', delimiter='\t')
data = nd.array((data - data.mean(axis=0)) / data.std(axis=0))
data_iter = d2l.load_array((data[:n, :-1], data[:n, -1]),
batch_size, is_train=True)
return data_iter, data.shape[1]-1
# Defined in file: ./chapter_optimization/
def train_ch10(trainer_fn, states, hyperparams, data_iter,
feature_dim, num_epochs=2):
# Initialization
w = nd.random.normal(scale=0.01, shape=(feature_dim, 1))
b = nd.zeros(1)
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
# Train
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y).mean()
trainer_fn([w, b], states, hyperparams)
n += X.shape[0]
if n % 200 == 0:
d2l.evaluate_loss(net, data_iter, loss))
print('loss: %.3f, %.3f sec/epoch'%(animator.Y[0][-1], timer.avg()))
return timer.cumsum(), animator.Y[0]
# Defined in file: ./chapter_optimization/
def train_gluon_ch10(trainer_name, trainer_hyperparams,
data_iter, num_epochs=2):
# Initialization
net = nn.Sequential()
trainer = gluon.Trainer(
net.collect_params(), trainer_name, trainer_hyperparams)
loss = gluon.loss.L2Loss()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y)
n += X.shape[0]
if n % 200 == 0:
d2l.evaluate_loss(net, data_iter, loss))
print('loss: %.3f, %.3f sec/epoch'%(animator.Y[0][-1], timer.avg()))
# Defined in file: ./chapter_computational-performance/
def split_batch(X, y, ctx_list):
"""Split X and y into multiple devices specified by ctx"""
assert X.shape[0] == y.shape[0]
return (gluon.utils.split_and_load(X, ctx_list),
gluon.utils.split_and_load(y, ctx_list))
# Defined in file: ./chapter_computational-performance/
def resnet18(num_classes):
"""A slightly modified ResNet-18 model"""
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
num_channels, use_1x1conv=True, strides=2))
return blk
net = nn.Sequential()
# This model uses a smaller convolution kernel, stride, and padding and
# removes the maximum pooling layer
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
# Defined in file: ./chapter_computational-performance/
def evaluate_accuracy_gpus(net, data_iter):
# Query the list of devices.
ctx_list = list(net.collect_params().values())[0].list_ctx()
metric = d2l.Accumulator(2) # num_corrected_examples, num_examples
for features, labels in data_iter:
Xs, ys = d2l.split_batch(features, labels, ctx_list)
pys = [net(X) for X in Xs] # run in parallel
metric.add(sum(d2l.accuracy(py, y) for py, y in zip(pys, ys)),
return metric[0]/metric[1]
# Defined in file: ./chapter_computer-vision/
def train_batch_ch12(net, features, labels, loss, trainer, ctx_list):
Xs, ys = d2l.split_batch(features, labels, ctx_list)
with autograd.record():
pys = [net(X) for X in Xs]
ls = [loss(py, y) for py, y in zip(pys, ys)]
for l in ls:
train_loss_sum = sum([l.sum().asscalar() for l in ls])
train_acc_sum = sum(d2l.accuracy(py, y) for py, y in zip(pys, ys))
return train_loss_sum, train_acc_sum
# Defined in file: ./chapter_computer-vision/
def train_ch12(net, train_iter, test_iter, loss, trainer, num_epochs,
num_batches, timer = len(train_iter), d2l.Timer()
animator = d2l.Animator(xlabel='epoch', xlim=[0,num_epochs], ylim=[0,2],
legend=['train loss','train acc','test acc'])
for epoch in range(num_epochs):
# store training_loss, training_accuracy, num_examples, num_features
metric = d2l.Accumulator(4)
for i, (features, labels) in enumerate(train_iter):
l, acc = train_batch_ch12(
net, features, labels, loss, trainer, ctx_list)
metric.add(l, acc, labels.shape[0], labels.size)
if (i+1) % (num_batches // 5) == 0:
(metric[0]/metric[2], metric[1]/metric[3], None))
test_acc = d2l.evaluate_accuracy_gpus(net, test_iter)
animator.add(epoch+1, (None, None, test_acc))
print('loss %.3f, train acc %.3f, test acc %.3f' % (
metric[0]/metric[2], metric[1]/metric[3], test_acc))
print('%.1f exampes/sec on %s' % (
metric[2]*num_epochs/timer.sum(), ctx_list))
# Defined in file: ./chapter_computer-vision/
def bbox_to_rect(bbox, color):
"""Convert bounding box to matplotlib format."""
# Convert the bounding box (top-left x, top-left y, bottom-right x,
# bottom-right y) format to matplotlib format: ((upper-left x,
# upper-left y), width, height)
return d2l.plt.Rectangle(
xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],
fill=False, edgecolor=color, linewidth=2)
# Defined in file: ./chapter_computer-vision/
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""Show bounding boxes."""
def _make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
labels = _make_list(labels)
colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c'])
for i, bbox in enumerate(bboxes):
color = colors[i % len(colors)]
rect = d2l.bbox_to_rect(bbox.asnumpy(), color)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i],
va='center', ha='center', fontsize=9, color=text_color,
bbox=dict(facecolor=color, lw=0))
# Defined in file: ./chapter_computer-vision/
def download_pikachu(data_dir):
root_url = (''
dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8',
'train.idx': 'dcf7318b2602c06428b9988470c731621716c393',
'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'}
for k, v in dataset.items():
root_url + k, os.path.join(data_dir, k), sha1_hash=v)
# Defined in file: ./chapter_computer-vision/
def load_data_pikachu(batch_size, edge_size=256):
"""Load the pikachu dataset"""
data_dir = '../data/pikachu'
train_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'train.rec'),
path_imgidx=os.path.join(data_dir, 'train.idx'),
data_shape=(3, edge_size, edge_size), # The shape of the output image
shuffle=True, # Read the data set in random order
rand_crop=1, # The probability of random cropping is 1
min_object_covered=0.95, max_attempts=200)
val_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'val.rec'), batch_size=batch_size,
data_shape=(3, edge_size, edge_size), shuffle=False)
return train_iter, val_iter
# Defined in file: ./chapter_computer-vision/
def download_voc_pascal(data_dir='../data'):
"""Download the VOC2012 segmentation dataset."""
voc_dir = os.path.join(data_dir, 'VOCdevkit/VOC2012')
url = ('')
sha1 = '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'
fname =, data_dir, sha1_hash=sha1)
with, 'r') as f:
return voc_dir
# Defined in file: ./chapter_computer-vision/
def read_voc_images(root='../data/VOCdevkit/VOC2012', is_train=True):
"""Read all VOC feature and label images."""
txt_fname = '%s/ImageSets/Segmentation/%s' % (
root, 'train.txt' if is_train else 'val.txt')
with open(txt_fname, 'r') as f:
images =
features, labels = [None] * len(images), [None] * len(images)
for i, fname in enumerate(images):
features[i] = image.imread('%s/JPEGImages/%s.jpg' % (root, fname))
labels[i] = image.imread(
'%s/SegmentationClass/%s.png' % (root, fname))
return features, labels
# Defined in file: ./chapter_computer-vision/
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
# Defined in file: ./chapter_computer-vision/
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
# Defined in file: ./chapter_computer-vision/
def build_colormap2label():
"""Build a RGB color to label mapping for segmentation."""
colormap2label = nd.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
# Defined in file: ./chapter_computer-vision/
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# Defined in file: ./chapter_computer-vision/
def voc_rand_crop(feature, label, height, width):
"""Randomly crop for both feature and label images."""
feature, rect = image.random_crop(feature, (width, height))
label = image.fixed_crop(label, *rect)
return feature, label
# Defined in file: ./chapter_computer-vision/
class VOCSegDataset(
"""A customized dataset to load VOC dataset."""
def __init__(self, is_train, crop_size, voc_dir):
self.rgb_mean = nd.array([0.485, 0.456, 0.406])
self.rgb_std = nd.array([0.229, 0.224, 0.225])
self.crop_size = crop_size
features, labels = read_voc_images(root=voc_dir, is_train=is_train)
self.features = [self.normalize_image(feature)
for feature in self.filter(features)]
self.labels = self.filter(labels)
self.colormap2label = build_colormap2label()
print('read ' + str(len(self.features)) + ' examples')
def normalize_image(self, img):
return (img.astype('float32') / 255 - self.rgb_mean) / self.rgb_std
def filter(self, imgs):
return [img for img in imgs if (
img.shape[0] >= self.crop_size[0] and
img.shape[1] >= self.crop_size[1])]
def __getitem__(self, idx):
feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
return (feature.transpose((2, 0, 1)),
voc_label_indices(label, self.colormap2label))
def __len__(self):
return len(self.features)
# Defined in file: ./chapter_computer-vision/
def load_data_voc(batch_size, crop_size):
"""Download and load the VOC2012 semantic dataset."""
voc_dir = d2l.download_voc_pascal()
num_workers = d2l.get_dataloader_workers()
train_iter =
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, last_batch='discard', num_workers=num_workers)
test_iter =
VOCSegDataset(False, crop_size, voc_dir), batch_size,
last_batch='discard', num_workers=num_workers)
return train_iter, test_iter
# Defined in file: ./chapter_natural-language-processing/
def read_ptb():
with zipfile.ZipFile('../data/', 'r') as f:
raw_text ='ptb/ptb.train.txt').decode("utf-8")
return [line.split() for line in raw_text.split('\n')]
# Defined in file: ./chapter_natural-language-processing/
def subsampling(sentences, vocab):
# Map low frequency words into <unk>
sentences = [[vocab.idx_to_token[vocab[tk]] for tk in line]
for line in sentences]
# Count the frequency for each word
counter = d2l.count_corpus(sentences)
num_tokens = sum(counter.values())
# Return True if to keep this token during subsampling
keep = lambda token: (
random.uniform(0, 1) < math.sqrt(1e-4 / counter[token] * num_tokens))
# Now do the subsampling.
return [[tk for tk in line if keep(tk)] for line in sentences]
# Defined in file: ./chapter_natural-language-processing/
def get_centers_and_contexts(corpus, max_window_size):
centers, contexts = [], []
for line in corpus:
# Each sentence needs at least 2 words to form a
# "central target word - context word" pair
if len(line) < 2: continue
centers += line
for i in range(len(line)): # Context window centered at i
window_size = random.randint(1, max_window_size)
indices = list(range(max(0, i - window_size),
min(len(line), i + 1 + window_size)))
# Exclude the central target word from the context words
contexts.append([line[idx] for idx in indices])
return centers, contexts
# Defined in file: ./chapter_natural-language-processing/
class RandomGenerator(object):
"""Draw a random int in [0, n] according to n sampling weights"""
def __init__(self, sampling_weights):
self.population = list(range(len(sampling_weights)))
self.sampling_weights = sampling_weights
self.candidates = []
self.i = 0
def draw(self):
if self.i == len(self.candidates):
self.candidates = random.choices(
self.population, self.sampling_weights, k=10000)
self.i = 0
self.i += 1
return self.candidates[self.i-1]
# Defined in file: ./chapter_natural-language-processing/
def get_negatives(all_contexts, corpus, K):
counter = d2l.count_corpus(corpus)
sampling_weights = [counter[i]**0.75 for i in range(len(counter))]
all_negatives, generator = [], RandomGenerator(sampling_weights)
for contexts in all_contexts:
negatives = []
while len(negatives) < len(contexts) * K:
neg = generator.draw()
# Noise words cannot be context words
if neg not in contexts:
return all_negatives
# Defined in file: ./chapter_natural-language-processing/
def batchify(data):
max_len = max(len(c) + len(n) for _, c, n in data)
centers, contexts_negatives, masks, labels = [], [], [], []
for center, context, negative in data:
cur_len = len(context) + len(negative)
centers += [center]
contexts_negatives += [context + negative + [0] * (max_len - cur_len)]
masks += [[1] * cur_len + [0] * (max_len - cur_len)]
labels += [[1] * len(context) + [0] * (max_len - len(context))]
return (nd.array(centers).reshape((-1, 1)), nd.array(contexts_negatives),
nd.array(masks), nd.array(labels))
# Defined in file: ./chapter_natural-language-processing/
def load_data_ptb(batch_size, max_window_size, num_noise_words):
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled = subsampling(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(all_contexts, corpus, num_noise_words)
dataset =
all_centers, all_contexts, all_negatives)
data_iter =, batch_size, shuffle=True,
return data_iter, vocab
# Defined in file: ./chapter_natural-language-processing/
def download_imdb(data_dir='../data'):
url = ''
fname =, data_dir)
with, 'r') as f:
# Defined in file: ./chapter_natural-language-processing/
def read_imdb(folder='train', data_dir='../data'):
data, labels = [], []
for label in ['pos', 'neg']:
folder_name = os.path.join(data_dir, 'aclImdb', folder, label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review ='utf-8').replace('\n', '')
labels.append(1 if label == 'pos' else 0)
return data, labels
# Defined in file: ./chapter_natural-language-processing/
def load_data_imdb(batch_size, num_steps=500):
train_data, test_data = read_imdb('train'), read_imdb('test')
train_tokens = d2l.tokenize(train_data[0], token='word')
test_tokens = d2l.tokenize(test_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
train_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in train_tokens])
test_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in test_tokens])
train_iter = d2l.load_array((train_features, train_data[1]), batch_size)
test_iter = d2l.load_array((test_features, test_data[1]), batch_size,
return train_iter, test_iter, vocab
# Defined in file: ./chapter_natural-language-processing/
def predict_sentiment(net, vocab, sentence):
sentence = nd.array(vocab[sentence.split()], ctx=d2l.try_gpu())
label = nd.argmax(net(sentence.reshape((1, -1))), axis=1)
return 'positive' if label.asscalar() == 1 else 'negative'
# Defined in file: ./chapter_generative_adversarial_networks/
def update_D(X, Z, net_D, net_G, loss, trainer_D):
"""Update discriminator"""
batch_size = X.shape[0]
ones = nd.ones((batch_size,), ctx=X.context)
zeros = nd.zeros((batch_size,), ctx=X.context)
with autograd.record():
real_Y = net_D(X)
fake_X = net_G(Z)
# Don't need to compute gradient for net_G, detach it from
# computing gradients.
fake_Y = net_D(fake_X.detach())
loss_D = (loss(real_Y, ones) + loss(fake_Y, zeros)) / 2
return loss_D.sum().asscalar()
# Defined in file: ./chapter_generative_adversarial_networks/
def update_G(Z, net_D, net_G, loss, trainer_G): # saved in d2l
"""Update generator"""
batch_size = Z.shape[0]
ones = nd.ones((batch_size,), ctx=Z.context)
with autograd.record():
# We could reuse fake_X from update_D to save computation.
fake_X = net_G(Z)
# Recomputing fake_Y is needed since net_D is changed.
fake_Y = net_D(fake_X)
loss_G = loss(fake_Y, ones)
return loss_G.sum().asscalar()