文章作者:Tyan
博客:noahsnail.com ?|? CSDN ?|? 簡書
本文主要介紹Keras的一些基本用法。
- Demo
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
# 創(chuàng)建數(shù)據(jù)
X = np.linspace(-1, 1, 200)
# 數(shù)據(jù)隨機(jī)化
np.random.shuffle(X)
# 創(chuàng)建數(shù)據(jù)及參數(shù), 并加入噪聲
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200,))
# 繪制數(shù)據(jù)
plt.scatter(X, Y)
plt.show()
# 分為訓(xùn)練數(shù)據(jù)和測試數(shù)據(jù)
X_train, Y_train = X[:160], Y[:160]
X_test, Y_test = X[160:], Y[160:]
# 使用keras創(chuàng)建神經(jīng)網(wǎng)絡(luò)
# Sequential是指一層層堆疊的神經(jīng)網(wǎng)絡(luò)
# Dense是指全連接層
# 定義model
model = Sequential()
# 定義第一層, 由于是回歸模型, 因此只有一層
model.add(Dense(units = 1, input_dim = 1))
# 選擇損失函數(shù)和優(yōu)化方法
model.compile(loss = 'mse', optimizer = 'sgd')
print '----Training----'
# 訓(xùn)練過程
for step in range(501):
# 進(jìn)行訓(xùn)練, 返回?fù)p失(代價(jià))函數(shù)
cost = model.train_on_batch(X_train, Y_train)
if step % 100 == 0:
print 'loss: ', cost
print '----Testing----'
# 訓(xùn)練結(jié)束進(jìn)行測試
cost = model.evaluate(X_test, Y_test, batch_size = 40)
print 'test loss: ', cost
# 獲取參數(shù)
W, b = model.layers[0].get_weights()
print 'Weights: ',W
print 'Biases: ', b
- 結(jié)果
----Training----
loss: 3.97799
loss: 0.100697
loss: 0.0118289
loss: 0.00448105
loss: 0.00278243
loss: 0.00232763
----Testing----
40/40 [==============================] - 0s
test loss: 0.00307717337273
Weights: [[ 0.47406867]]
Biases: [ 1.99442744]
image