使用MNIST數(shù)據(jù)集訓練神經(jīng)網(wǎng)絡模型镶蹋。訓練數(shù)據(jù)由28*28的手寫數(shù)字的圖像組成,輸入層包含784=28*28個神經(jīng)元。輸入像素是灰度級的,值為0.0表示白色昼弟,值為1.0表示黑色,中間數(shù)值表示逐漸暗淡的灰色奕筐。
Algorithm
Codes
mnist_loader.py: 加載數(shù)據(jù)
import numpy as np
import pickle
import gzip
def load_data():
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
tr_d, va_d, te_d = load_data()
# tr_d[0]: x; 1*784
# tr_d[1]: y; 0-9
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
v = np.zeros((10, 1))
v[j] = 1.0
return v
network.py: 算法舱痘,包括小批量梯度下降、反向傳播算法
import numpy as np
import random
class Network(object):
def __init__(self, sizes):
"""初始化權(quán)重和偏置
:param sizes: 每一層神經(jīng)元數(shù)量离赫,類型為list
weights:權(quán)重
biases:偏置
"""
self.sizes = sizes
self.num_layers = len(sizes)
self.weights = np.array([np.random.randn(x, y) for x, y in zip(sizes[1:], sizes[:-1])])
self.biases = np.array([np.random.randn(y, 1) for y in sizes[1:]])
def feedforward(self, a):
"""對一組樣本x進行預測芭逝,然后輸出"""
for w, b in zip(self.weights, self.biases):
a = sigmoid(np.dot(w, a) + b)
return a
def gradient_descent(self, training_data, epochs, mini_batch_size, alpha, test_data=None):
"""MBGD,運行一個或者幾個batch時更新一次
:param training_data: 訓練數(shù)據(jù)笆怠,每一個樣本包括(x, y)铝耻,類型為zip
:epochs: 迭代次數(shù)
:mini_batch_size:每一個小批量數(shù)據(jù)的數(shù)量
:alpha: 學習率
:test_data: 測試數(shù)據(jù)
"""
training_data = list(training_data)
n = len(training_data)
if test_data:
test_data = list(test_data)
n_test = len(list(test_data))
for i in range(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
init_ws_derivative = np.array([np.zeros(w.shape) for w in self.weights])
init_bs_derivative = np.array([np.zeros(b.shape) for b in self.biases])
for x, y in mini_batch:
activations, zs = self.forwardprop(x) #前向傳播
delta = self.cost_deviation(activations[-1], zs[-1], y) #計算最后一層誤差
ws_derivative, bs_derivative = self.backprop(activations, zs, delta) #反向傳播,cost func對w和b求偏導
init_ws_derivative = init_ws_derivative + ws_derivative
init_bs_derivative = init_bs_derivative + bs_derivative
self.weights = self.weights - alpha / len(mini_batch) * init_ws_derivative
self.biases = self.biases - alpha / len(mini_batch) * init_bs_derivative
if test_data:
print("Epoch {} : {} / {}".format(i, self.evaluate(test_data), n_test)) #識別準確數(shù)量/測試數(shù)據(jù)集總數(shù)量
else:
print("Epoch {} complete".format(i))
def forwardprop(self, x):
"""前向傳播"""
activation = x
activations = [x]
zs = []
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
return (activations, zs)
def cost_deviation(self, output, z, y):
"""計算最后一層誤差"""
return (output - y) * sigmoid_derivative(z)
def backprop(self, activations, zs, delta):
"""反向傳播"""
ws_derivative = np.array([np.zeros(w.shape) for w in self.weights])
bs_derivative = np.array([np.zeros(b.shape) for b in self.biases])
ws_derivative[-1] = np.dot(delta, activations[-2].transpose())
bs_derivative[-1] = delta
for l in range(2, self.num_layers):
z = zs[-l]
delta = np.dot((self.weights[-l+1]).transpose(), delta) * sigmoid_derivative(z)
ws_derivative[-l] = np.dot(delta, activations[-l-1].transpose())
bs_derivative[-l] = delta
return (ws_derivative, bs_derivative)
def evaluate(self, test_data):
"""評估"""
test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]
return sum(int(output == y) for (output, y) in test_results)
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_derivative(z):
"""sigmoid函數(shù)偏導"""
return sigmoid(z) * (1 - sigmoid(z))
run.py: 運行蹬刷,訓練一個三層(1個輸入層、1個隱藏層频丘、1個輸出層)的神經(jīng)網(wǎng)絡模型
import mnist_loader
import network
if __name__ == '__main__':
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = network.Network([784, 30, 10]) #28*28
net.gradient_descent(training_data, 30, 10, 3.0, test_data=test_data)
更多內(nèi)容:Github個人博客
備注:本文發(fā)表于 https://cnyangkui.github.io/2018/10/07/ML-NeuralNetwork/