Logistic 回歸主要用于分類(lèi)情況已艰,我有一堆數(shù)據(jù),我想從某種角度或者按照他們某個(gè)特征蚕苇,分成兩類(lèi)哩掺。于是引入分類(lèi)函數(shù)。
如下涩笤,我有一堆數(shù)據(jù)點(diǎn)嚼吞,
最終我將他們分成活躍的和非活躍的,黑線左邊為非活躍的數(shù)據(jù)蹬碧,黑線右邊為活躍的數(shù)據(jù)舱禽,這條黑線就是目標(biāo)擬合線,他用來(lái)劃分將來(lái)的某個(gè)數(shù)據(jù)點(diǎn)應(yīng)該屬于左邊的非活躍數(shù)據(jù)點(diǎn)恩沽,還是右邊的活躍數(shù)據(jù)點(diǎn)呢蔫。
用比較理論的語(yǔ)言就是
利用Logistic回歸進(jìn)行分類(lèi)的主要思想是,根據(jù)現(xiàn)有數(shù)據(jù)對(duì)分類(lèi)邊界線建立回歸公式飒筑,以此進(jìn)行分類(lèi)。
流程基本是這樣的:
如何找到一個(gè)函數(shù)绽昏,無(wú)論x如何變化协屡,y集中在0和1呢?
引入單位階躍函數(shù)全谤,sigmond 函數(shù)肤晓,
公式為
函數(shù)曲線為
可以看出當(dāng),z范圍越大认然,階躍函數(shù)分類(lèi)的性質(zhì)就會(huì)越明顯补憾。于是如何讓z取得最大值,是分類(lèi)成功或者失敗的保障卷员。
梯度上升算法盈匾,是取Z最高值的經(jīng)典方法。
具體可以采用批量梯度上升算法和隨機(jī)梯度上升算法
具體的流程如下毕骡,最終效果削饵,在更迭學(xué)習(xí)速率,并且優(yōu)化隨機(jī)向量的基礎(chǔ)上未巫,計(jì)算速率大大提高了窿撬。
批量梯度下降python實(shí)現(xiàn)
import pandas as pd
import numpy as np
from numpy import *
def loadDataSet():
dataMat = [];
labelMat = [];
fr = open('test.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
#讀取數(shù)據(jù),dataMat最左側(cè)加一列
labelMat.append(float(lineArr[2]))
#數(shù)據(jù)的最后一列是訓(xùn)練集的判定叙凡,y
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+np.exp(-inX))
def gradAscent(dataMatIn,classLabels):
#轉(zhuǎn)換成矩陣類(lèi)型
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()
m,n=np.shape(dataMatrix)
alpha = 0.0001
maxCycles = 50
weights = ones((n,1))
for k in range(maxCycles):
h=sigmoid(dataMatrix*weights)
error = (labelMat-h)
weights = weights+alpha * dataMatrix.transpose()*error
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
#labelMat = array(labelMat)
n=shape(dataArr)[0]
xcord1 = [];ycord1=[]
xcord2 = [];ycord2=[]
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]);ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]);ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s = 30,c='red',marker = 's')
ax.scatter(xcord2,ycord2,s=30,c='green')
x=arange(-3.0,3.0,0.1)
#設(shè)定y=w0x0+w1x1+w2x2
w0=weights[0]
w1=weights[1]
w2=weights[2]
y=(w0+w1*x)/w2
ax.plot(x,y)
plt.xlabel('X1');
plt.ylabel('X2');
plt.show()
dataArr,labelMat=loadDataSet()
weights = gradAscent(dataArr,labelMat)
print(weights[1])
plotBestFit(weights.getA())
#將weights以array方式輸入
效果如下
隨機(jī)梯度上升算法
def stocGradAscent0(dataMatrix,classLabels):
m,n=shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i]-h
weights = weights + alpha * error * dataMatrix[i]
return weights
dataArr,labelMat=loadDataSet()
weights = stocGradAscent0(array(dataArr),labelMat)
print(weights[1])
plotBestFit(weights)
#將weights以array方式輸入
效果如下
改進(jìn)的隨機(jī)梯度上升算法
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m,n= shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = list(range(m))
#print(dataIndex)
for i in range(m):
alpha = 4/(1.0+j+i)+0.01
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha*error*dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
dataArr,labelMat=loadDataSet()
weights = stocGradAscent1(array(dataArr),labelMat)
#print(weights[1])
plotBestFit(weights)
#將weights以array方式輸入
效果如下