前言
學(xué)習(xí)筆記來自于 何寬大佬的學(xué)習(xí)筆記
本文的相關(guān)資料來自于 何寬大佬的百度云
import numpy as np
import matplotlib.pyplot as plt
import h5py
from lr_utils import load_dataset
- numpy :是用Python進(jìn)行科學(xué)計(jì)算的基本軟件包异旧。
- h5py:是與H5文件中存儲的數(shù)據(jù)集進(jìn)行交互的常用軟件包。
- matplotlib:是一個(gè)著名的庫奔垦,用于在Python中繪制圖表。
- lr_utils :在本文的資料包里蛇耀,一個(gè)加載資料包里面的數(shù)據(jù)的簡單功能的庫佣渴。
lr_utils.py
import numpy as np
import h5py
def load_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
解釋以下上面的load_dataset() 返回的值的含義:
train_set_x_orig :保存的是訓(xùn)練集里面的圖像數(shù)據(jù)(本訓(xùn)練集有209張64x64的圖像)剥险。
train_set_y_orig :保存的是訓(xùn)練集的圖像對應(yīng)的分類值(【0 | 1】,0表示不是貓狰闪,1表示是貓)疯搅。
test_set_x_orig :保存的是測試集里面的圖像數(shù)據(jù)(本訓(xùn)練集有50張64x64的圖像)。
test_set_y_orig : 保存的是測試集的圖像對應(yīng)的分類值(【0 | 1】埋泵,0表示不是貓幔欧,1表示是貓)。
classes : 保存的是以bytes類型保存的兩個(gè)字符串?dāng)?shù)據(jù)丽声,數(shù)據(jù)為:[b’non-cat’ b’cat’]礁蔗。
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y ,classes = load_dataset()
index = 25
plt.imshow(train_set_x_orig[index])
# print('train_set_y = ' +str(train_set_y))
<matplotlib.image.AxesImage at 0x7fe8acf15160>
#打印出當(dāng)前的訓(xùn)練標(biāo)簽值
#使用np.squeeze的目的是壓縮維度,【未壓縮】train_set_y[:,index]的值為[1] , 【壓縮后】np.squeeze(train_set_y[:,index])的值為1
#print("【使用np.squeeze:" + str(np.squeeze(train_set_y[:,index])) + "雁社,不使用np.squeeze: " + str(train_set_y[:,index]) + "】")
#只有壓縮后的值才能進(jìn)行解碼操作
print("y=" + str(train_set_y[:,index]) + ", it's a " + classes[np.squeeze(train_set_y[:,index])].decode("utf-8") + "' picture")
y=[1], it's a cat' picture
m_train = train_set_y.shape[1]
m_test = test_set_y.shape[1]
num_px = train_set_x_orig.shape[1]
print ("訓(xùn)練集的數(shù)量: m_train = " + str(m_train))
print ("測試集的數(shù)量 : m_test = " + str(m_test))
print ("每張圖片的寬/高 : num_px = " + str(num_px))
print ("每張圖片的大小 : (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("訓(xùn)練集_圖片的維數(shù) : " + str(train_set_x_orig.shape))
print ("訓(xùn)練集_標(biāo)簽的維數(shù) : " + str(train_set_y.shape))
print ("測試集_圖片的維數(shù): " + str(test_set_x_orig.shape))
print ("測試集_標(biāo)簽的維數(shù): " + str(test_set_y.shape))
訓(xùn)練集的數(shù)量: m_train = 209
測試集的數(shù)量 : m_test = 50
每張圖片的寬/高 : num_px = 64
每張圖片的大小 : (64, 64, 3)
訓(xùn)練集_圖片的維數(shù) : (209, 64, 64, 3)
訓(xùn)練集_標(biāo)簽的維數(shù) : (1, 209)
測試集_圖片的維數(shù): (50, 64, 64, 3)
測試集_標(biāo)簽的維數(shù): (1, 50)
為了方便浴井,我們要把維度為(64,64霉撵,3)的numpy數(shù)組重新構(gòu)造為(64 x 64 x 3磺浙,1)的數(shù)組洪囤,要乘以3的原因是每張圖片是由64x64像素構(gòu)成的,而每個(gè)像素點(diǎn)由(R撕氧,G瘤缩,B)三原色構(gòu)成的,所以要乘以3伦泥。在此之后剥啤,我們的訓(xùn)練和測試數(shù)據(jù)集是一個(gè)numpy數(shù)組,【每列代表一個(gè)平坦的圖像】 奄喂,應(yīng)該有m_train和m_test列铐殃。
當(dāng)你想將形狀(a,b跨新,c富腊,d)的矩陣X平鋪成形狀(b * c * d,a)的矩陣X_flatten時(shí)域帐,可以使用以下代碼:
# e.g: X_flatten = X.reshape(X.shape[0], -1).T
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("訓(xùn)練集降維最后的維度: " + str(train_set_x_flatten.shape))
print ("訓(xùn)練集_標(biāo)簽的維數(shù) : " + str(train_set_y.shape))
print ("測試集降維之后的維度: " + str(test_set_x_flatten.shape))
print ("測試集_標(biāo)簽的維數(shù) : " + str(test_set_y.shape))
訓(xùn)練集降維最后的維度: (12288, 209)
訓(xùn)練集_標(biāo)簽的維數(shù) : (1, 209)
測試集降維之后的維度: (12288, 50)
測試集_標(biāo)簽的維數(shù) : (1, 50)
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255
sigmoid函數(shù)
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
#測試sigmoid()
print("====================測試sigmoid====================")
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(9.2) = " + str(sigmoid(9.2)))
====================測試sigmoid====================
sigmoid(0) = 0.5
sigmoid(9.2) = 0.999898970806
初始化 W and b
def initialize_with_zeros(dim):
w = np.zeros(shape = (dim,1))
b = 0
assert(w.shape == (dim, 1))
assert(isinstance(b ,float) or isinstance(b, int))
return (w, b)
前向傳播函數(shù)
def propagate(w, b, X, Y):
m = X.shape[1]
A = sigmoid(np.dot(w.T, X) + b)
cost = (-1 / m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1-A))
dw = (1 / m) * np.dot(X, (A-Y).T)
db = (1 / m) * np.sum(A - Y)
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {
'dw': dw,
'db': db
}
return (grads, cost)
#測試一下propagate
print("====================測試propagate====================")
#初始化一些參數(shù)
w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
====================測試propagate====================
dw = [[ 0.99993216]
[ 1.99980262]]
db = 0.499935230625
cost = 6.00006477319
漸變下降更新參數(shù)
def optimize(w , b , X , Y , num_iterations , learning_rate , print_cost = False):
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads['dw']
db = grads['db']
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0:
costs.append(cost)
if (print_cost) and (i % 100 == 0):
print("迭代的次數(shù): %i 赘被, 誤差值: %f" % (i,cost))
params = {
'w': w,
'b': b
}
grads = {
'dw' : dw,
'db' : db
}
return (params, grads, costs)
#測試optimize
print("====================測試optimize====================")
w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
params , grads , costs = optimize(w , b , X , Y , num_iterations=100 , learning_rate = 0.009 , print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
====================測試optimize====================
w = [[ 0.1124579 ]
[ 0.23106775]]
b = 1.55930492484
dw = [[ 0.90158428]
[ 1.76250842]]
db = 0.430462071679
def predict(w, b, X):
m = X.shape[1]
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
A = sigmoid(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
Y_prediction[0, i] = 1 if A[0,i] > 0.5 else 0
assert(Y_prediction.shape == (1, m))
return Y_prediction
#測試predict
print("====================測試predict====================")
w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
print("predictions = " + str(predict(w, b, X)))
====================測試predict====================
predictions = [[ 1. 1.]]
def model(X_train , Y_train , X_test , Y_test , num_iterations = 2000 , learning_rate = 0.5 , print_cost = False):
"""
通過調(diào)用之前實(shí)現(xiàn)的函數(shù)來構(gòu)建邏輯回歸模型
參數(shù):
X_train - numpy的數(shù)組,維度為(num_px * num_px * 3,m_train)的訓(xùn)練集
Y_train - numpy的數(shù)組,維度為(1肖揣,m_train)(矢量)的訓(xùn)練標(biāo)簽集
X_test - numpy的數(shù)組,維度為(num_px * num_px * 3民假,m_test)的測試集
Y_test - numpy的數(shù)組,維度為(1,m_test)的(向量)的測試標(biāo)簽集
num_iterations - 表示用于優(yōu)化參數(shù)的迭代次數(shù)的超參數(shù)
learning_rate - 表示optimize()更新規(guī)則中使用的學(xué)習(xí)速率的超參數(shù)
print_cost - 設(shè)置為true以每100次迭代打印成本
返回:
d - 包含有關(guān)模型信息的字典龙优。
"""
w , b = initialize_with_zeros(X_train.shape[0])
parameters , grads , costs = optimize(w , b , X_train , Y_train,num_iterations , learning_rate , print_cost)
#從字典“參數(shù)”中檢索參數(shù)w和b
w , b = parameters["w"] , parameters["b"]
#預(yù)測測試/訓(xùn)練集的例子
Y_prediction_test = predict(w , b, X_test)
Y_prediction_train = predict(w , b, X_train)
#打印訓(xùn)練后的準(zhǔn)確性
print("訓(xùn)練集準(zhǔn)確性:" , format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100) ,"%")
print("測試集準(zhǔn)確性:" , format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100) ,"%")
d = {
"costs" : costs,
"Y_prediction_test" : Y_prediction_test,
"Y_prediciton_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations" : num_iterations }
return d
print("====================測試model====================")
#這里加載的是真實(shí)的數(shù)據(jù)羊异,請參見上面的代碼部分。
costs = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
====================測試model====================
迭代的次數(shù): 0 彤断, 誤差值: 0.693147
迭代的次數(shù): 100 野舶, 誤差值: 0.584508
迭代的次數(shù): 200 , 誤差值: 0.466949
迭代的次數(shù): 300 宰衙, 誤差值: 0.376007
迭代的次數(shù): 400 平道, 誤差值: 0.331463
迭代的次數(shù): 500 , 誤差值: 0.303273
迭代的次數(shù): 600 供炼, 誤差值: 0.279880
迭代的次數(shù): 700 一屋, 誤差值: 0.260042
迭代的次數(shù): 800 , 誤差值: 0.242941
迭代的次數(shù): 900 袋哼, 誤差值: 0.228004
迭代的次數(shù): 1000 冀墨, 誤差值: 0.214820
迭代的次數(shù): 1100 , 誤差值: 0.203078
迭代的次數(shù): 1200 先嬉, 誤差值: 0.192544
迭代的次數(shù): 1300 轧苫, 誤差值: 0.183033
迭代的次數(shù): 1400 , 誤差值: 0.174399
迭代的次數(shù): 1500 , 誤差值: 0.166521
迭代的次數(shù): 1600 含懊, 誤差值: 0.159305
迭代的次數(shù): 1700 身冬, 誤差值: 0.152667
迭代的次數(shù): 1800 , 誤差值: 0.146542
迭代的次數(shù): 1900 岔乔, 誤差值: 0.140872
訓(xùn)練集準(zhǔn)確性: 99.04306220095694 %
測試集準(zhǔn)確性: 70.0 %
#繪制圖
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title('Learning rate =' + str(d['learning_rate']))
plt.show()
跑一波出來的效果圖是這樣的酥筝,可以看到成本下降,它顯示參數(shù)正在被學(xué)習(xí):
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print('learing rate is:' + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print('\n' + '----------------------')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
learing rate is:0.01
訓(xùn)練集準(zhǔn)確性: 99.52153110047847 %
測試集準(zhǔn)確性: 68.0 %
----------------------
learing rate is:0.001
訓(xùn)練集準(zhǔn)確性: 88.99521531100478 %
測試集準(zhǔn)確性: 64.0 %
----------------------
learing rate is:0.0001
訓(xùn)練集準(zhǔn)確性: 68.42105263157895 %
測試集準(zhǔn)確性: 36.0 %
----------------------
[圖片上傳失敗...(image-20aa0a-1532787467802)]
完整代碼
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py
from lr_utils import load_dataset
train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()
m_train = train_set_y.shape[1] #訓(xùn)練集里圖片的數(shù)量雏门。
m_test = test_set_y.shape[1] #測試集里圖片的數(shù)量嘿歌。
num_px = train_set_x_orig.shape[1] #訓(xùn)練、測試集里面的圖片的寬度和高度(均為64x64)茁影。
#現(xiàn)在看一看我們加載的東西的具體情況
print ("訓(xùn)練集的數(shù)量: m_train = " + str(m_train))
print ("測試集的數(shù)量 : m_test = " + str(m_test))
print ("每張圖片的寬/高 : num_px = " + str(num_px))
print ("每張圖片的大小 : (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("訓(xùn)練集_圖片的維數(shù) : " + str(train_set_x_orig.shape))
print ("訓(xùn)練集_標(biāo)簽的維數(shù) : " + str(train_set_y.shape))
print ("測試集_圖片的維數(shù): " + str(test_set_x_orig.shape))
print ("測試集_標(biāo)簽的維數(shù): " + str(test_set_y.shape))
#將訓(xùn)練集的維度降低并轉(zhuǎn)置宙帝。
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
#將測試集的維度降低并轉(zhuǎn)置。
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("訓(xùn)練集降維最后的維度: " + str(train_set_x_flatten.shape))
print ("訓(xùn)練集_標(biāo)簽的維數(shù) : " + str(train_set_y.shape))
print ("測試集降維之后的維度: " + str(test_set_x_flatten.shape))
print ("測試集_標(biāo)簽的維數(shù) : " + str(test_set_y.shape))
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255
def sigmoid(z):
"""
參數(shù):
z - 任何大小的標(biāo)量或numpy數(shù)組募闲。
返回:
s - sigmoid(z)
"""
s = 1 / (1 + np.exp(-z))
return s
def initialize_with_zeros(dim):
"""
此函數(shù)為w創(chuàng)建一個(gè)維度為(dim步脓,1)的0向量,并將b初始化為0浩螺。
參數(shù):
dim - 我們想要的w矢量的大醒セ肌(或者這種情況下的參數(shù)數(shù)量)
返回:
w - 維度為(dim,1)的初始化向量要出。
b - 初始化的標(biāo)量(對應(yīng)于偏差)
"""
w = np.zeros(shape = (dim,1))
b = 0
#使用斷言來確保我要的數(shù)據(jù)是正確的
assert(w.shape == (dim, 1)) #w的維度是(dim,1)
assert(isinstance(b, float) or isinstance(b, int)) #b的類型是float或者是int
return (w , b)
def propagate(w, b, X, Y):
"""
實(shí)現(xiàn)前向和后向傳播的成本函數(shù)及其梯度鸳君。
參數(shù):
w - 權(quán)重,大小不等的數(shù)組(num_px * num_px * 3患蹂,1)
b - 偏差或颊,一個(gè)標(biāo)量
X - 矩陣類型為(num_px * num_px * 3,訓(xùn)練數(shù)量)
Y - 真正的“標(biāo)簽”矢量(如果非貓則為0传于,如果是貓則為1)饭宾,矩陣維度為(1,訓(xùn)練數(shù)據(jù)數(shù)量)
返回:
cost- 邏輯回歸的負(fù)對數(shù)似然成本
dw - 相對于w的損失梯度,因此與w相同的形狀
db - 相對于b的損失梯度格了,因此與b的形狀相同
"""
m = X.shape[1]
#正向傳播
A = sigmoid(np.dot(w.T,X) + b) #計(jì)算激活值,請參考公式2徽鼎。
cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A))) #計(jì)算成本盛末,請參考公式3和4。
#反向傳播
dw = (1 / m) * np.dot(X, (A - Y).T) #請參考視頻中的偏導(dǎo)公式否淤。
db = (1 / m) * np.sum(A - Y) #請參考視頻中的偏導(dǎo)公式悄但。
#使用斷言確保我的數(shù)據(jù)是正確的
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
#創(chuàng)建一個(gè)字典,把dw和db保存起來石抡。
grads = {
"dw": dw,
"db": db
}
return (grads , cost)
def optimize(w , b , X , Y , num_iterations , learning_rate , print_cost = False):
"""
此函數(shù)通過運(yùn)行梯度下降算法來優(yōu)化w和b
參數(shù):
w - 權(quán)重檐嚣,大小不等的數(shù)組(num_px * num_px * 3,1)
b - 偏差,一個(gè)標(biāo)量
X - 維度為(num_px * num_px * 3嚎京,訓(xùn)練數(shù)據(jù)的數(shù)量)的數(shù)組嗡贺。
Y - 真正的“標(biāo)簽”矢量(如果非貓則為0,如果是貓則為1)鞍帝,矩陣維度為(1,訓(xùn)練數(shù)據(jù)的數(shù)量)
num_iterations - 優(yōu)化循環(huán)的迭代次數(shù)
learning_rate - 梯度下降更新規(guī)則的學(xué)習(xí)率
print_cost - 每100步打印一次損失值
返回:
params - 包含權(quán)重w和偏差b的字典
grads - 包含權(quán)重和偏差相對于成本函數(shù)的梯度的字典
成本 - 優(yōu)化期間計(jì)算的所有成本列表诫睬,將用于繪制學(xué)習(xí)曲線。
提示:
我們需要寫下兩個(gè)步驟并遍歷它們:
1)計(jì)算當(dāng)前參數(shù)的成本和梯度帕涌,使用propagate()摄凡。
2)使用w和b的梯度下降法則更新參數(shù)。
"""
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
#記錄成本
if i % 100 == 0:
costs.append(cost)
#打印成本數(shù)據(jù)
if (print_cost) and (i % 100 == 0):
print("迭代的次數(shù): %i 蚓曼, 誤差值: %f" % (i,cost))
params = {
"w" : w,
"b" : b }
grads = {
"dw": dw,
"db": db }
return (params , grads , costs)
def predict(w , b , X ):
"""
使用學(xué)習(xí)邏輯回歸參數(shù)logistic (w亲澡,b)預(yù)測標(biāo)簽是0還是1,
參數(shù):
w - 權(quán)重纫版,大小不等的數(shù)組(num_px * num_px * 3床绪,1)
b - 偏差,一個(gè)標(biāo)量
X - 維度為(num_px * num_px * 3捎琐,訓(xùn)練數(shù)據(jù)的數(shù)量)的數(shù)據(jù)
返回:
Y_prediction - 包含X中所有圖片的所有預(yù)測【0 | 1】的一個(gè)numpy數(shù)組(向量)
"""
m = X.shape[1] #圖片的數(shù)量
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0],1)
#計(jì)預(yù)測貓?jiān)趫D片中出現(xiàn)的概率
A = sigmoid(np.dot(w.T , X) + b)
for i in range(A.shape[1]):
#將概率a [0会涎,i]轉(zhuǎn)換為實(shí)際預(yù)測p [0,i]
Y_prediction[0,i] = 1 if A[0,i] > 0.5 else 0
#使用斷言
assert(Y_prediction.shape == (1,m))
return Y_prediction
def model(X_train , Y_train , X_test , Y_test , num_iterations = 2000 , learning_rate = 0.5 , print_cost = False):
"""
通過調(diào)用之前實(shí)現(xiàn)的函數(shù)來構(gòu)建邏輯回歸模型
參數(shù):
X_train - numpy的數(shù)組,維度為(num_px * num_px * 3瑞凑,m_train)的訓(xùn)練集
Y_train - numpy的數(shù)組,維度為(1末秃,m_train)(矢量)的訓(xùn)練標(biāo)簽集
X_test - numpy的數(shù)組,維度為(num_px * num_px * 3,m_test)的測試集
Y_test - numpy的數(shù)組,維度為(1籽御,m_test)的(向量)的測試標(biāo)簽集
num_iterations - 表示用于優(yōu)化參數(shù)的迭代次數(shù)的超參數(shù)
learning_rate - 表示optimize()更新規(guī)則中使用的學(xué)習(xí)速率的超參數(shù)
print_cost - 設(shè)置為true以每100次迭代打印成本
返回:
d - 包含有關(guān)模型信息的字典练慕。
"""
w , b = initialize_with_zeros(X_train.shape[0])
parameters , grads , costs = optimize(w , b , X_train , Y_train,num_iterations , learning_rate , print_cost)
#從字典“參數(shù)”中檢索參數(shù)w和b
w , b = parameters["w"] , parameters["b"]
#預(yù)測測試/訓(xùn)練集的例子
Y_prediction_test = predict(w , b, X_test)
Y_prediction_train = predict(w , b, X_train)
#打印訓(xùn)練后的準(zhǔn)確性
print("訓(xùn)練集準(zhǔn)確性:" , format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100) ,"%")
print("測試集準(zhǔn)確性:" , format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100) ,"%")
d = {
"costs" : costs,
"Y_prediction_test" : Y_prediction_test,
"Y_prediciton_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations" : num_iterations }
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
#繪制圖
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
訓(xùn)練集的數(shù)量: m_train = 209
測試集的數(shù)量 : m_test = 50
每張圖片的寬/高 : num_px = 64
每張圖片的大小 : (64, 64, 3)
訓(xùn)練集_圖片的維數(shù) : (209, 64, 64, 3)
訓(xùn)練集_標(biāo)簽的維數(shù) : (1, 209)
測試集_圖片的維數(shù): (50, 64, 64, 3)
測試集_標(biāo)簽的維數(shù): (1, 50)
訓(xùn)練集降維最后的維度: (12288, 209)
訓(xùn)練集_標(biāo)簽的維數(shù) : (1, 209)
測試集降維之后的維度: (12288, 50)
測試集_標(biāo)簽的維數(shù) : (1, 50)
迭代的次數(shù): 0 稽穆, 誤差值: 0.693147
迭代的次數(shù): 100 守问, 誤差值: 0.584508
迭代的次數(shù): 200 , 誤差值: 0.466949
迭代的次數(shù): 300 侦厚, 誤差值: 0.376007
迭代的次數(shù): 400 哑梳, 誤差值: 0.331463
迭代的次數(shù): 500 劲阎, 誤差值: 0.303273
迭代的次數(shù): 600 , 誤差值: 0.279880
迭代的次數(shù): 700 鸠真, 誤差值: 0.260042
迭代的次數(shù): 800 悯仙, 誤差值: 0.242941
迭代的次數(shù): 900 , 誤差值: 0.228004
迭代的次數(shù): 1000 吠卷, 誤差值: 0.214820
迭代的次數(shù): 1100 锡垄, 誤差值: 0.203078
迭代的次數(shù): 1200 , 誤差值: 0.192544
迭代的次數(shù): 1300 祭隔, 誤差值: 0.183033
迭代的次數(shù): 1400 货岭, 誤差值: 0.174399
迭代的次數(shù): 1500 , 誤差值: 0.166521
迭代的次數(shù): 1600 , 誤差值: 0.159305
迭代的次數(shù): 1700 千贯, 誤差值: 0.152667
迭代的次數(shù): 1800 屯仗, 誤差值: 0.146542
迭代的次數(shù): 1900 , 誤差值: 0.140872
訓(xùn)練集準(zhǔn)確性: 99.04306220095694 %
測試集準(zhǔn)確性: 70.0 %