數(shù)據(jù)
我們將建立一個(gè)邏輯回歸模型來(lái)預(yù)測(cè)一個(gè)學(xué)生是否被大學(xué)錄取。假設(shè)你是一個(gè)大學(xué)系的管理員线婚,你想根據(jù)兩次考試的結(jié)果來(lái)決定每個(gè)申請(qǐng)人的錄取機(jī)會(huì)嗅蔬。你有以前的申請(qǐng)人的歷史數(shù)據(jù),你可以用它作為邏輯回歸的訓(xùn)練集淌铐。對(duì)于每一個(gè)培訓(xùn)例子,你有兩個(gè)考試的申請(qǐng)人的分?jǐn)?shù)和錄取決定蔫缸。為了做到這一點(diǎn)腿准,我們將建立一個(gè)分類模型,根據(jù)考試成績(jī)估計(jì)入學(xué)概率捂龄。導(dǎo)入數(shù)據(jù)并查看
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
path = 'data' + os.sep + 'LogiReg_data.txt' # os.sep 根據(jù)你所處的平臺(tái)释涛,自動(dòng)地采用相應(yīng)的分割符號(hào)加叁。
pdData = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted'])
pdData.head()
pdData.shape
positive = pdData[pdData['Admitted'] == 1] # 指定正例
negative = pdData[pdData['Admitted'] == 0] # 指定負(fù)例
fig, ax = plt.subplots(figsize=(10,5))
ax.scatter(positive['Exam 1'], positive['Exam 2'], s=30, c='b', marker='o', label='Admitted')
ax.scatter(negative['Exam 1'], negative['Exam 2'], s=30, c='r', marker='x', label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
-
使用邏輯回歸
算法實(shí)現(xiàn)步驟 -
先來(lái)建立sigmoid函數(shù)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
nums = np.arange(-10, 10, step=1)
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(nums, sigmoid(nums), 'r')
- model 返回預(yù)測(cè)結(jié)果
def model(X, theta): # 預(yù)測(cè)函數(shù)
return sigmoid(np.dot(X, theta.T)) # 矩陣的乘法
pdData.insert(0, 'Ones', 1) # 新增一列值都為1
# set X (training data) and y (target variable)
orig_data = pdData.as_matrix()
cols = orig_data.shape[1]
X = orig_data[:,0:cols-1]
y = orig_data[:,cols-1:cols]
# 轉(zhuǎn)換成numpy數(shù)組并插入?yún)?shù)數(shù)組
#X = np.matrix(X.values)
#y = np.matrix(data.iloc[:,3:4].values) #np.array(y.values)
theta = np.zeros([1, 3]) # 設(shè)置三個(gè)theta值
查看一下設(shè)置后的效果
X[:5]
y[:5]
theta
X.shape, y.shape, theta.shape
6 . 構(gòu)造損失函數(shù), 計(jì)算平均損失
def cost(X, y, theta):
left = np.multiply(-y, np.log(model(X, theta)))
right = np.multiply(1 - y, np.log(1 - model(X, theta)))
return np.sum(left - right) / (len(X))
cost(X, y, theta) # 平均損失值
-
計(jì)算梯度
求theta偏導(dǎo)
def gradient(X, y, theta):
grad = np.zeros(theta.shape) # 有幾個(gè)theta就有幾個(gè)梯度
error = (model(X, theta)- y).ravel()
for j in range(len(theta.ravel())): #for each parmeter
term = np.multiply(error, X[:,j])
grad[0, j] = np.sum(term) / len(X)
return grad
比較3種不同的梯度下降方法
STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2
def stopCriterion(type, value, threshold):
#設(shè)定三種不同的停止策略
if type == STOP_ITER: return value > threshold
elif type == STOP_COST: return abs(value[-1]-value[-2]) < threshold
elif type == STOP_GRAD: return np.linalg.norm(value) < threshold
import numpy.random
# 為了使模型的泛化能力更強(qiáng), 將數(shù)據(jù)全部打亂
def shuffleData(data):
np.random.shuffle(data) # 洗牌函數(shù)
cols = data.shape[1]
X = data[:, 0:cols-1]
y = data[:, cols-1:]
return X, y
# 觀察時(shí)間對(duì)結(jié)果的影響
import time
def descent(data, theta, batchSize, stopType, thresh, alpha):
#梯度下降求解
init_time = time.time()
i = 0 # 迭代次數(shù)
k = 0 # batch
X, y = shuffleData(data)
grad = np.zeros(theta.shape) # 計(jì)算的梯度
costs = [cost(X, y, theta)] # 損失值
while True:
grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta)
k += batchSize #取batch數(shù)量個(gè)數(shù)據(jù)
if k >= n:
k = 0
X, y = shuffleData(data) #重新洗牌
theta = theta - alpha*grad # 參數(shù)更新
costs.append(cost(X, y, theta)) # 計(jì)算新的損失
i += 1
# 何時(shí)停止
if stopType == STOP_ITER: value = i
elif stopType == STOP_COST: value = costs
elif stopType == STOP_GRAD: value = grad
if stopCriterion(stopType, value, thresh): break
return theta, i-1, costs, grad, time.time() - init_time
# 根據(jù)傳入?yún)?shù)選擇梯度下降方式以及停止策略并繪圖展示
def runExpe(data, theta, batchSize, stopType, thresh, alpha):
#import pdb; pdb.set_trace();
theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha)
name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"
name += " data - learning rate: {} - ".format(alpha)
if batchSize==n: strDescType = "Gradient"
elif batchSize==1: strDescType = "Stochastic"
else: strDescType = "Mini-batch ({})".format(batchSize)
name += strDescType + " descent - Stop: "
if stopType == STOP_ITER: strStop = "{} iterations".format(thresh)
elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh)
else: strStop = "gradient norm < {}".format(thresh)
name += strStop
print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(
name, theta, iter, costs[-1], dur))
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(np.arange(len(costs)), costs, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title(name.upper() + ' - Error vs. Iteration')
return theta
- 比較不同的停止策略
- 基于迭代次數(shù)停止
#選擇的梯度下降方法是基于所有樣本的
n=100
runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001) # 迭代次數(shù)5000, 學(xué)習(xí)率0.0000001
- 基于指定損失值停止
設(shè)定閾值 1E-6, 差不多需要110 000次迭代
runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001)
- 基于梯度變化停止
設(shè)定閾值 0.05,差不多需要40 000次迭代
runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001)
- 對(duì)比不同的梯度下降法
- 隨機(jī)梯度下降法Stochastic descent
runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001) # 每次迭代1個(gè)樣本
顯然很不穩(wěn)定, 試試將學(xué)習(xí)率調(diào)小一些
runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002)
由上圖可以看出速度快,但穩(wěn)定性差唇撬,需要很小的學(xué)習(xí)率
- 小批量梯度下降法(Mini-batch Gradient Descent)
runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001)
浮動(dòng)仍然比較大它匕,我們來(lái)嘗試下對(duì)數(shù)據(jù)進(jìn)行標(biāo)準(zhǔn)化 將數(shù)據(jù)按其屬性(按列進(jìn)行)減去其均值,然后除以其方差窖认。最后得到的結(jié)果是豫柬,對(duì)每個(gè)屬性/每列來(lái)說(shuō)所有數(shù)據(jù)都聚集在0附近,方差值為1
from sklearn import preprocessing as pp
scaled_data = orig_data.copy()
scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3])
runExpe(scaled_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001)
它好多了扑浸!原始數(shù)據(jù)烧给,只能達(dá)到達(dá)到0.61,而我們得到了0.38個(gè)在這里喝噪! 所以對(duì)數(shù)據(jù)做預(yù)處理是非常重要的
runExpe(scaled_data, theta, n, STOP_GRAD, thresh=0.02, alpha=0.001)
theta = runExpe(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001)
隨機(jī)梯度下降更快,但是我們需要迭代的次數(shù)也需要更多酝惧,所以還是用batch的比較合適A穸Α!晚唇!
- 最終使用mini-batch, 將批量值設(shè)為16
runExpe(scaled_data, theta, 16, STOP_GRAD, thresh=0.002*2, alpha=0.001)
只迭代了4000次, 損失值為0.21 得到了相對(duì)較好的效果
- 精度
#設(shè)定閾值
def predict(X, theta):
return [1 if x >= 0.5 else 0 for x in model(X, theta)]
scaled_X = scaled_data[:, :3]
y = scaled_data[:, 3]
predictions = predict(scaled_X, theta)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))