一蝗罗、線性回歸模型使用Pytorch的簡(jiǎn)潔實(shí)現(xiàn)
生成數(shù)據(jù)集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
讀取數(shù)據(jù)集
import torch.utils.data as Data
batch_size = 10
# combine featues and labels of dataset
dataset = Data.TensorDataset(features, labels)
# put dataset into DataLoader
data_iter = Data.DataLoader(
? ? ? ? ? ? ? ? ? ? ? ? ? ? dataset=dataset,? ? ? ? ? ? # torch TensorDataset format
? ? ? ? ? ? ? ? ? ? ? ? ? ? batch_size=batch_size,? ? ? # mini batch size
? ? ? ? ? ? ? ? ? ? ? ? ? ? shuffle=True,? ? ? ? ? ? ? # whether shuffle the data or not
? ? ? ? ? ? ? ? ? ? ? ? ? ? num_workers=2,? ? ? ? ? ? ? # read data in multithreading
? ? ? ? ? ? ? ? ? ? ? ? ? ? )
定義模型
class LinearNet(nn.Module):
? ? def __init__(self, n_feature):
? ? ? ? super(LinearNet, self).__init__()? ? ? # call father function to init
? ? ? ? self.linear = nn.Linear(n_feature, 1)? # function prototype: `torch.nn.Linear(in_features, out_features, bias=True)`
? ? def forward(self, x):
? ? ? ? y = self.linear(x)
? ? ? ? return y
net = LinearNet(num_inputs)
初始化模型參數(shù)
from torch.nn import init
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0)?
定義損失函數(shù)和優(yōu)化函數(shù)
loss=nn.MSELoss()
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03)? # built-in random gradient descent function
print(optimizer)
訓(xùn)練
num_epochs = 3
for epoch in range(1, num_epochs + 1):
? ? for X, y in data_iter:
? ? ? ? output = net(X)
? ? ? ? l = loss(output, y.view(-1, 1))
? ? ? ? optimizer.zero_grad() # reset gradient, equal to net.zero_grad()
? ? ? ? l.backward()
? ? ? ? optimizer.step()
? ? print('epoch %d, loss: %f' % (epoch, l.item()))
dense = net[0]
二、循環(huán)神經(jīng)網(wǎng)絡(luò)的pytorch簡(jiǎn)潔實(shí)現(xiàn)
nn.RNN
我們使用Pytorch中的nn.RNN來(lái)構(gòu)造循環(huán)神經(jīng)網(wǎng)絡(luò)隆敢。
重點(diǎn)關(guān)注nn.RNN的以下幾個(gè)構(gòu)造函數(shù)參數(shù):
input_size?- The number of expected features in the input x
hidden_size?– The number of features in the hidden state h
nonlinearity?– The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'
batch_first?– If True, then the input and output tensors are provided as (batch_size, num_steps, input_size). Default: False
這里的batch_first決定了輸入的形狀洞焙,我們使用默認(rèn)的參數(shù)False,對(duì)應(yīng)的輸入形狀是 (num_steps, batch_size, input_size)。
forward函數(shù)的參數(shù)為:
input?of shape (num_steps, batch_size, input_size): tensor containing the features of the input sequence.
h_0?of shape (num_layers * num_directions, batch_size, hidden_size): tensor containing the initial hidden state for each element in the batch. Defaults to zero if not provided. If the RNN is bidirectional, num_directions should be 2, else it should be 1.
forward函數(shù)的返回值是:
output?of shape (num_steps, batch_size, num_directions * hidden_size): tensor containing the output features (h_t) from the last layer of the RNN, for each t.
h_n?of shape (num_layers * num_directions, batch_size, hidden_size): tensor containing the hidden state for t = num_steps.
定義模型
RNN模型的構(gòu)造
class RNNModel(nn.Module):
? ? def __init__(self, rnn_layer, vocab_size):
? ? ? ? super(RNNModel, self).__init__()
? ? ? ? self.rnn = rnn_layer
? ? ? ? self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1)
? ? ? ? self.vocab_size = vocab_size
? ? ? ? self.dense = nn.Linear(self.hidden_size, vocab_size)
? ? def forward(self, inputs, state):
? ? ? ? # inputs.shape: (batch_size, num_steps)
? ? ? ? X = to_onehot(inputs, vocab_size)
? ? ? ? X = torch.stack(X)? # X.shape: (num_steps, batch_size, vocab_size)
? ? ? ? hiddens, state = self.rnn(X, state)
? ? ? ? hiddens = hiddens.view(-1, hiddens.shape[-1])? # hiddens.shape: (num_steps * batch_size, hidden_size)
? ? ? ? output = self.dense(hiddens)
? ? ? ? return output, state
預(yù)測(cè)函數(shù)的構(gòu)造
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,
? ? ? ? ? ? ? ? ? ? ? ? char_to_idx):
? ? state = None
? ? output = [char_to_idx[prefix[0]]]? # output記錄prefix加上預(yù)測(cè)的num_chars個(gè)字符
? ? for t in range(num_chars + len(prefix) - 1):
? ? ? ? X = torch.tensor([output[-1]], device=device).view(1, 1)
? ? ? ? (Y, state) = model(X, state)? # 前向計(jì)算不需要傳入模型參數(shù)
? ? ? ? if t < len(prefix) - 1:
? ? ? ? ? ? output.append(char_to_idx[prefix[t + 1]])
? ? ? ? else:
? ? ? ? ? ? output.append(Y.argmax(dim=1).item())
? ? return ''.join([idx_to_char[i] for i in output])
訓(xùn)練函數(shù)的構(gòu)造
def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? corpus_indices, idx_to_char, char_to_idx,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? num_epochs, num_steps, lr, clipping_theta,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? batch_size, pred_period, pred_len, prefixes):
? ? loss = nn.CrossEntropyLoss()
? ? optimizer = torch.optim.Adam(model.parameters(), lr=lr)
? ? model.to(device)
? ? for epoch in range(num_epochs):
? ? ? ? l_sum, n, start = 0.0, 0, time.time()
? ? ? ? data_iter = d2l.data_iter_consecutive(corpus_indices, batch_size, num_steps, device) # 相鄰采樣
? ? ? ? state = None
? ? ? ? for X, Y in data_iter:
? ? ? ? ? ? if state is not None:
? ? ? ? ? ? ? ? # 使用detach函數(shù)從計(jì)算圖分離隱藏狀態(tài)
? ? ? ? ? ? ? ? if isinstance (state, tuple): # LSTM, state:(h, c)
? ? ? ? ? ? ? ? ? ? state[0].detach_()
? ? ? ? ? ? ? ? ? ? state[1].detach_()
? ? ? ? ? ? ? ? else:
? ? ? ? ? ? ? ? ? ? state.detach_()
? ? ? ? (output, state) = model(X, state) # output.shape: (num_steps * batch_size, vocab_size)
? ? ? ? ? ? y = torch.flatten(Y.T)
? ? ? ? ? ? l = loss(output, y.long())
? ? ? ? ? ? optimizer.zero_grad()
? ? ? ? ? ? l.backward()
? ? ? ? ? ? grad_clipping(model.parameters(), clipping_theta, device)
? ? ? ? ? ? optimizer.step()
? ? ? ? ? ? l_sum += l.item() * y.shape[0]
? ? ? ? ? ? n += y.shape[0]
? ? ? ? if (epoch + 1) % pred_period == 0:
? ? ? ? ? ? print('epoch %d, perplexity %f, time %.2f sec' % (
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? epoch + 1, math.exp(l_sum / n), time.time() - start))
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? for prefix in prefixes:
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? print(' -', predict_rnn_pytorch(
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? prefix, pred_len, model, vocab_size, device, idx_to_char,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? char_to_idx))
訓(xùn)練函數(shù)
num_epochs, batch_size, lr, clipping_theta = 250, 32, 1e-3, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分開', '不分開']
train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? corpus_indices, idx_to_char, char_to_idx,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? num_epochs, num_steps, lr, clipping_theta,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? batch_size, pred_period, pred_len, prefixes)