使用keras搭建CNN深度學(xué)習(xí)網(wǎng)絡(luò),使用mnist數(shù)據(jù)集溪猿。
一钩杰、導(dǎo)入所需要的包
##導(dǎo)入所需要的包
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
二、載入數(shù)據(jù)集
mnist.load載入數(shù)據(jù)集再愈,并且將x_train榜苫,x_test進(jìn)行形狀處理, y_train护戳,y_test進(jìn)行獨(dú)熱編碼處理翎冲。
(x_train, y_train), (x_test, y_test) = mnist.load_data()
##將x_train,x_test(60000,28,28)轉(zhuǎn)為(60000,28,28,1), 固定行28,28媳荒,28*28=784
# reshape中的-1代表會轉(zhuǎn)為不確定 /255數(shù)據(jù)歸一化
x_train = x_train.reshape(-1, 28, 28, 1)/255.0
x_test = x_test.reshape(-1, 28, 28, 1)/255.0
##y_train 獨(dú)熱標(biāo)簽 舉例 數(shù)字5 會轉(zhuǎn)為[0,0,0,0,1,0,0,0,0,0]
y_train = np_utils.to_categorical(y_train, num_classes =10)
y_test = np_utils.to_categorical(y_test, num_classes =10)
三抗悍、開始建模
model = Sequential()
##第一個(gè)卷積層
##input_shap 輸入平面
##filters 卷積核/濾波器
#kernel_size 卷積窗口大小
##strides 步長
## padding_方式 same/valid
## activation 激活函數(shù)
model.add(Convolution2D(
input_shape = (28,28,1),
filters = 32,
kernel_size = 5,
strides = 1,
padding = 'same',
activation = 'relu'
))
##第一個(gè)池化層
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same',
))
##第二個(gè)卷積層
model.add(Convolution2D(64, 5, strides = 1, padding = 'same', activation = 'relu'))
##第二個(gè)池化層
model.add(MaxPooling2D(2,2,'same'))
#把第二個(gè)池化層的輸出扁平化為1維
model.add(Flatten())
#第一個(gè)全連接層
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.5))
##第二個(gè)全連接層
model.add(Dense(10, activation= 'softmax'))
#優(yōu)化器
adam = Adam(lr = 0.004)
model.compile(optimizer = adam,
loss = 'categorical_crossentropy', ##交叉熵 模型收斂速度會快categorical_crossentropy ,mse
metrics = ['accuracy'],
)
四、模型訓(xùn)練和評估
##訓(xùn)練模型
model.fit(x_train, y_train, batch_size = 64, epochs = 10)
##評估模型
loss,acc = model.evaluate(x_test, y_test)
附完整代碼
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1)/255.0
x_test = x_test.reshape(-1, 28, 28, 1)/255.0
y_train = np_utils.to_categorical(y_train, num_classes =10)
y_test = np_utils.to_categorical(y_test, num_classes =10)
model = Sequential()
model.add(Convolution2D(
input_shape = (28,28,1),
filters = 32,
kernel_size = 5,
strides = 1,
padding = 'same',
activation = 'relu'
))
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same',
))
model.add(Convolution2D(64, 5, strides = 1, padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(2,2,'same'))
model.add(Flatten())
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation= 'softmax'))
adam = Adam(lr = 0.004)
model.compile(optimizer = adam,
loss = 'categorical_crossentropy',
metrics = ['accuracy'],
)
model.fit(x_train, y_train, batch_size = 64, epochs = 10)
loss,acc = model.evaluate(x_test, y_test)