參考莫煩教程
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 10) # 不需要考慮batch_size
self.l2 = torch.nn.Linear(10, 1)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.l2(x)
return x
if __name__ == "__main__":
# 1. 數(shù)據(jù)準備
x_np = np.linspace(-1, 1, 100, dtype=np.float32) # 保證使用from_numpy生成FloatTensor
x = torch.unsqueeze(torch.from_numpy(x_np), dim=1)
y = x ** 2 + 0.2 * torch.rand(x.size())
x, y = Variable(x), Variable(y)
# plt.scatter(x.data.numpy(), y.data.numpy())
# plt.show()
# 2. 定義網(wǎng)絡結(jié)構(gòu)
neural_net = Net()
# print(neural_net) # 查看網(wǎng)絡結(jié)構(gòu)
# 3. 訓練網(wǎng)絡
optimizer = torch.optim.SGD(neural_net.parameters(), lr=0.5)
loss_F = torch.nn.MSELoss()
# 畫圖
plt.ion() # 打開交互模式,調(diào)用plot會立即顯示,無需使用show()
for t in range(100):
# 重載了__call__()方法
prediction = neural_net(x) # 默認把第一維看成batch_size,定義網(wǎng)絡時并不關(guān)心batch_size
loss = loss_F(prediction, y)
if t % 10 == 0:
# 畫圖
plt.cla() # 清空圖
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), color="red", linewidth=2.0)
plt.text(0.5, 0.1, "loss: {:.5f}".format(loss.data.numpy()))
plt.pause(0.1) # 如果不暫停,循環(huán)10次會非常快潭辈。導致只能看到最后一張圖
optimizer.zero_grad() # 因為每次反向傳播的時候嫡锌,變量里面的梯度都要清零
loss.backward() # 變量得到了grad
optimizer.step() # 更新參數(shù)
plt.ioff()
plt.show() # 使用show()會阻塞(即,窗口不會變化蚕苇,也不會自動退出)