介紹
代碼介紹
- 線性回歸衷畦,
y=Wx+b
,W表示權(quán)重,b表示偏置
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 學(xué)習(xí)率
learning_rate = 0.01
# 迭代次數(shù)
training_steps = 1000
display_step = 50
# 訓(xùn)練數(shù)據(jù)
X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
# 取出數(shù)組X的長(zhǎng)度
n_samples = X.shape[0]
# 隨機(jī)初始化權(quán)重,偏置
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
# 線性回歸(Wx+b)
def linear_regression(x):
return W * x + b
# 均方差
def mean_square(y_pred,y_true):
return tf.reduce_sum(tf.pow(y_pred - y_true, 2)) / (2 * n_samples)
# 隨機(jī)梯度下降優(yōu)化器
optimizer = tf.optimizers.SGD(learning_rate)
# 優(yōu)化過(guò)程
def run_optimization():
# 將計(jì)算封裝在GradientTape中以實(shí)現(xiàn)自動(dòng)微分
with tf.GradientTape() as g:
pred = linear_regression(X)
loss = mean_square(pred, Y)
# 計(jì)算梯度
# print("loss is ", loss)
gradients = g.gradient(loss, [W, b])
# 按gradients更新 W 和 b
optimizer.apply_gradients(zip(gradients, [W, b]))
# 針對(duì)給定訓(xùn)練步驟數(shù)開(kāi)始訓(xùn)練
for step in range(1, training_steps + 1):
# 運(yùn)行優(yōu)化以更新W和b值
run_optimization()
if step % display_step == 0:
pred = linear_regression(X)
loss = mean_square(pred, Y)
print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, W.numpy(), b.numpy()))
# 繪制圖
plt.plot(X, Y, 'ro', label='Original data')
plt.plot(X, np.array(W * X + b), label='Fitted line')
plt.legend()
plt.show()
- 另外一個(gè)版本是指定權(quán)重和偏置的:
y= x * 0.1 + 0.3
,權(quán)重0.1 偏置0.3
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 學(xué)習(xí)率
learning_rate = 0.05
# 迭代次數(shù)
training_steps = 1000
display_step = 20
# 訓(xùn)練數(shù)據(jù)
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3 #權(quán)重0.1 偏置0.3
# 隨機(jī)初始化權(quán)重,偏置
# 權(quán)重和偏置
Weights = tf.Variable(tf.random.uniform([1], -1.0, 1.0))
biases = tf.Variable(tf.zeros([1]))
# 隨機(jī)梯度下降優(yōu)化器
optimizer = tf.optimizers.SGD(learning_rate)
# 優(yōu)化過(guò)程
def run_optimization():
# 將計(jì)算封裝在GradientTape中以實(shí)現(xiàn)自動(dòng)微分
with tf.GradientTape() as g:
# 預(yù)測(cè)值y
y = Weights * x_data + biases
# 損失函數(shù)
loss = tf.reduce_mean(tf.square(y-y_data))
# 計(jì)算梯度
# print("loss is ", loss)
gradients = g.gradient(loss, [Weights, biases])
# 按gradients更新 W 和 b
optimizer.apply_gradients(zip(gradients, [Weights, biases]))
# 針對(duì)給定訓(xùn)練步驟數(shù)開(kāi)始訓(xùn)練
for step in range(1, training_steps + 1):
# 運(yùn)行優(yōu)化以更新W和b值
run_optimization()
if step % display_step == 0:
# 預(yù)測(cè)值y
y = Weights * x_data + biases
# 損失函數(shù)
loss = tf.reduce_mean(tf.square(y-y_data))
print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, Weights.numpy(), biases.numpy()))
# 繪制圖
plt.plot(x_data, y_data, 'ro', label='Original data')
plt.plot(x_data, np.array(Weights * x_data + biases), label='Fitted line')
plt.legend()
plt.show()
- 打印結(jié)果發(fā)現(xiàn),權(quán)重基本向0.1,偏置基本向0.3靠攏
step: 20, loss: 0.031721, W: -0.491671, b: 0.560534
step: 40, loss: 0.021747, W: -0.396559, b: 0.561665
step: 60, loss: 0.016477, W: -0.331112, b: 0.530155
step: 80, loss: 0.012491, W: -0.275286, b: 0.500551
...
step: 920, loss: 0.000000, W: 0.098881, b: 0.300598
step: 940, loss: 0.000000, W: 0.099026, b: 0.300521
step: 960, loss: 0.000000, W: 0.099152, b: 0.300453
step: 980, loss: 0.000000, W: 0.099261, b: 0.300395
step: 1000, loss: 0.000000, W: 0.099357, b: 0.300344
其他