import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets,ensemble,model_selection
加載scikit-learn自帶的糖尿病數(shù)據(jù)集
- 數(shù)據(jù)集有442個(gè)樣本
- 每個(gè)樣本有10個(gè)特征
- 每個(gè)特征都是浮點(diǎn)數(shù)忘闻,數(shù)據(jù)都在-0.2-0.2之間
- 樣本的目標(biāo)在整數(shù)25-346之間
def load_data_regression():
diabetes = datasets.load_diabetes()
return model_selection.train_test_split(datasets.data,datasets.target,random_state = 0)
在下面的分類問題中,使用的是scikit-learn自帶的手寫識(shí)別數(shù)據(jù)集Digit Dataset
- 該數(shù)據(jù)集由1797張圖片組成
- 每張圖片都是8x8大小的手寫數(shù)字位圖
def load_data_classifier():
digits = datasets.load_digits()
return model_selection.train_test_split(digits.data,digits.target,random_state=0)
def test_AdaboostClassifier(*data):
X_train,X_test,Y_train,Y_test = data
clf = ensemble.AdaBoostClassifier(learning_rate=0.1) ###默認(rèn)的基學(xué)習(xí)器是決策樹分類器
clf.fit(X_train,Y_train)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
estimators_num = len(clf.estimators_)
X = range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,Y_train)),label = "Training score")
ax.plot(list(X),list(clf.staged_score(X_test,Y_test)),label = "Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_title("AdaboostClassifier")
plt.show()
X_train,X_test,Y_train,Y_test = load_data_classifier()
test_AdaboostClassifier(X_train,X_test,Y_train,Y_test)
output_10_0.png
運(yùn)行結(jié)果分析:
- 可以看到隨著算法的推進(jìn)橡庞,每一輪迭代都產(chǎn)生一個(gè)新的個(gè)體分類器被集成。此時(shí)的集成分類器的訓(xùn)練誤差和測試誤差都在下降选泻,對(duì)應(yīng)的準(zhǔn)確率在上升袱结。
- 當(dāng)個(gè)體分類器數(shù)量達(dá)到一定值時(shí),集成分類器的預(yù)測準(zhǔn)確率在一定范圍內(nèi)波動(dòng)茶宵,比較穩(wěn)定。
- 集成學(xué)習(xí)能很好地抵抗過擬合宗挥。
考察不同類型的個(gè)體分類器的影響
def test_AdaboostClassifier_base_classifier(*data):
from sklearn.naive_bayes import GaussianNB ###高斯貝葉斯分類器
X_train,X_test,Y_train,Y_test = data
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
########默認(rèn)的個(gè)體分類器#########
clf = ensemble.AdaBoostClassifier(learning_rate=0.1)
clf.fit(X_train,Y_train)
estimators_num = len(clf.estimators_)
X = range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,Y_train)),label = "Training score")
ax.plot(list(X),list(clf.staged_score(X_test,Y_test)),label = "Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_ylim(0,1)
ax.set_title("AdaboostClassifier with Decision Tree")
#########Gaussian Naive Bayes 個(gè)體分類器 ############
ax = fig.add_subplot(2,1,2)
clf = ensemble.AdaBoostClassifier(learning_rate=0.1,base_estimator=GaussianNB())
clf.fit(X_train,Y_train)
estimators_num = len(clf.estimators_)
X = range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,Y_train)),label = "Training score")
ax.plot(list(X),list(clf.staged_score(X_test,Y_test)),label = "Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_ylim(0,1)
ax.set_title("AdaboostClassifier with Gaussian Naive Bayes")
plt.tight_layout()
plt.show()
X_train,X_test,Y_train,Y_test = load_data_classifier()
test_AdaboostClassifier_base_classifier(X_train,X_test,Y_train,Y_test)
output_10_0.png
運(yùn)行結(jié)果分析:
- 從比較結(jié)果來看乌庶,由于高斯分布貝葉斯個(gè)體分類器本身就是一個(gè)很強(qiáng)的分類器(即單個(gè)分類器的預(yù)測準(zhǔn)確率已經(jīng)非常好),所以它沒有一個(gè)明顯的預(yù)測準(zhǔn)確率提升的過程契耿,整體曲線都比較平緩瞒大。
考察學(xué)習(xí)率的影響
def test_AdaboostClassifier_learning_rate(*data):
X_train,X_test,Y_train,Y_test = data
learning_rates = np.linspace(0.01,1) ###返回均勻分布的數(shù)字,默認(rèn)是50份
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
train_score = []
test_score = []
for learning_rate in learning_rates:
clf = ensemble.AdaBoostClassifier(learning_rate=learning_rate,n_estimators=500)
clf.fit(X_train,Y_train)
train_score.append(clf.score(X_train,Y_train))
test_score.append(clf.score(X_test,Y_test))
ax.plot(learning_rates,train_score,label="Training score")
ax.plot(learning_rates,test_score,label="Testing score")
ax.set_xlabel("learning rates")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_title("AdaboostClassifier")
plt.show()
X_train,X_test,Y_train,Y_test = load_data_classifier()
test_AdaboostClassifier_learning_rate(X_train,X_test,Y_train,Y_test)
output_14_0.png
運(yùn)行結(jié)果分析:
- 當(dāng)學(xué)習(xí)率較小時(shí)搪桂,測試準(zhǔn)確率和訓(xùn)練準(zhǔn)確率隨著學(xué)習(xí)率的增大而緩慢上升透敌。但當(dāng)學(xué)習(xí)率超過0.7以后,隨著學(xué)習(xí)率的上升踢械,迅速下降酗电。
- 通常比較小的學(xué)習(xí)率會(huì)要求更多數(shù)量的弱學(xué)習(xí)器從而能保證集成效果。
- 經(jīng)驗(yàn)表明:比較小的學(xué)習(xí)率能夠帶來更小的誤差内列。因此推薦學(xué)習(xí)率小于等于0.1撵术,同時(shí)選擇一個(gè)很大的n_estimators(因?yàn)樗惴赡茉谟?xùn)練誤差到達(dá)一定程度的時(shí)候,就停止繼續(xù)訓(xùn)練话瞧,這稱為'早停'-early stopping,此時(shí)真實(shí)的個(gè)體學(xué)習(xí)器數(shù)量會(huì)小于n_estimators)嫩与。
考察algorithm的影響
def test_AdaboostClassifier_algorithm(*data):
X_train,X_test,Y_train,Y_test = data
algorithms = ["SAMME.R","SAMME"]
fig = plt.figure(figsize=(8,8))
learning_rates = [0.05,0.1,0.5,0.9]
for i,learning_rate in enumerate(learning_rates):
ax = fig.add_subplot(2,2,i+1)
for i,algorithm in enumerate(algorithms):
clf = ensemble.AdaBoostClassifier(learning_rate=learning_rate,algorithm=algorithm)
clf.fit(X_train,Y_train)
estimators_num = len(clf.estimators_)
X = range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,Y_train)),label="%s:Training score"%algorithms[i])
ax.plot(list(X),list(clf.staged_score(X_test,Y_test)),label="%s:Testing score"%algorithms[i])
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="lower right")
ax.set_title("learning rate:%f"%learning_rate)
plt.tight_layout()
plt.tight_layout()
plt.show()
X_train,X_test,Y_train,Y_test = load_data_classifier()
test_AdaboostClassifier_algorithm(X_train,X_test,Y_train,Y_test)
myplot.png
運(yùn)行結(jié)果分析
- 當(dāng)學(xué)習(xí)率較小時(shí),SAMME.R算法總是預(yù)測性能較好移稳。
- 當(dāng)學(xué)習(xí)率較大時(shí)蕴纳,SMME.R算法在個(gè)體分類器數(shù)量較少時(shí)預(yù)測性能較好,但在個(gè)體決策樹數(shù)量較多時(shí)預(yù)測性能較差个粱。這是因?yàn)镾AMME.R算法在個(gè)體分類器數(shù)量飽和狀態(tài)下的性能在學(xué)習(xí)率較大時(shí)會(huì)迅速下降古毛。