分類模型融合
from sklearn.datasets import make_blobs
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
from sklearn.metrics import accuracy_score,roc_auc_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
#Voting投票機(jī)制:
'''
硬投票:對(duì)多個(gè)模型直接進(jìn)行投票烈炭,不區(qū)分模型結(jié)果的相對(duì)重要度浇垦,最終投票數(shù)最多的類為最終被預(yù)測(cè)的類茸时。
'''
iris = datasets.load_iris()
x=iris.data
y=iris.target
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2, subsample=0.7,
colsample_bytree=0.6, objective='binary:logistic')
clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
min_samples_leaf=63,oob_score=True)
clf3 = SVC(C=0.1)
# 硬投票
eclf = VotingClassifier(estimators=[('xgb', clf1), ('rf', clf2), ('svc', clf3)], voting='hard')
for clf, label in zip([clf1, clf2, clf3, eclf], ['XGBBoosting', 'Random Forest', 'SVM', 'Ensemble']):
scores = cross_val_score(clf, x, y, cv=5, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
'''
軟投票:和硬投票原理相同罗丰,增加了設(shè)置權(quán)重的功能聪黎,可以為不同模型設(shè)置不同權(quán)重蔗喂,進(jìn)而區(qū)別模型不同的重要度忘渔。
'''
x=iris.data
y=iris.target
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2, subsample=0.8,
colsample_bytree=0.8, objective='binary:logistic')
clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
min_samples_leaf=63,oob_score=True)
clf3 = SVC(C=0.1, probability=True)
# 軟投票
eclf = VotingClassifier(estimators=[('xgb', clf1), ('rf', clf2), ('svc', clf3)], voting='soft', weights=[2, 1, 1])
clf1.fit(x_train, y_train)
for clf, label in zip([clf1, clf2, clf3, eclf], ['XGBBoosting', 'Random Forest', 'SVM', 'Ensemble']):
scores = cross_val_score(clf, x, y, cv=5, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
#分類的Stacking\Blending融合
'''
5-Fold Stacking
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier,GradientBoostingClassifier
import pandas as pd
#創(chuàng)建訓(xùn)練的數(shù)據(jù)集
data_0=iris.data
data=data_0[:100,:]
target_0=iris.target
target=target_0[:100]
#模型融合中使用到的單個(gè)單模型
clfs=[LogisticRegression(solver='lbfgs'),
RandomForestClassifier(n_estimators=5,n_jobs=-1,criterion='gini'),
ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='entropy'),
GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=5)]
#切分一部分?jǐn)?shù)據(jù)作為測(cè)試集
X, X_predict, y, y_predict = train_test_split(data, target, test_size=0.3, random_state=2020)
dataset_blend_train = np.zeros((X.shape[0], len(clfs)))
dataset_blend_test = np.zeros((X_predict.shape[0], len(clfs)))
##5折stacking
n_splits = 5
skf = StratifiedKFold(n_splits)
skf = skf.split(X, y)
for j, clf in enumerate(clfs):
#依次訓(xùn)練各個(gè)單模型
dataset_blend_test_j = np.zeros((X_predict.shape[0], 5))
for i, (train, test) in enumerate(skf):
#5-Fold交叉訓(xùn)練,使用第i個(gè)部分作為預(yù)測(cè)缰儿,剩余的部分來訓(xùn)練模型畦粮,獲得其預(yù)測(cè)的輸出作為第i部分的新特征。
X_train, y_train, X_test, y_test = X[train], y[train], X[test], y[test]
clf.fit(X_train, y_train)
y_submission = clf.predict_proba(X_test)[:, 1]
dataset_blend_train[test, j] = y_submission
dataset_blend_test_j[:, i] = clf.predict_proba(X_predict)[:, 1]
#對(duì)于測(cè)試集,直接用這k個(gè)模型的預(yù)測(cè)值均值作為新的特征宣赔。
dataset_blend_test[:, j] = dataset_blend_test_j.mean(1)
print("val auc Score: %f" % roc_auc_score(y_predict, dataset_blend_test[:, j]))
clf = LogisticRegression(solver='lbfgs')
clf.fit(dataset_blend_train, y)
y_submission = clf.predict_proba(dataset_blend_test)[:, 1]
print("Val auc Score of Stacking: %f" % (roc_auc_score(y_predict, y_submission)))
#分類的Stacking融合(利用mlxtend)
import warnings
warnings.filterwarnings('ignore')
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.model_selection import cross_val_score
from mlxtend.plotting import plot_learning_curves
from mlxtend.plotting import plot_decision_regions
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
lr = LogisticRegression()
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
meta_classifier=lr)
label = ['KNN', 'Random Forest', 'Naive Bayes', 'Stacking Classifier']
clf_list=[clf1,clf2,clf3,sclf]
fig=plt.figure(figsize=(10,8))
gs=gridspec.GridSpec(2,2)
grid=itertools.product([0,1],repeat=2)
clf_cv_mean=[]
clf_cv_std=[]
for clf, label, grd in zip(clf_list, label, grid):
scores = cross_val_score(clf, X, y, cv=3, scoring='accuracy')
print("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(), scores.std(), label))
clf_cv_mean.append(scores.mean())
clf_cv_std.append(scores.std())
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
fig = plot_decision_regions(X=X, y=y, clf=clf)
plt.title(label)
plt.show()
關(guān)于Blending
比stacking簡(jiǎn)單
使用數(shù)據(jù)少预麸,可能會(huì)過擬合
其主要思路是把原始的訓(xùn)練集先分成兩部分,比如70%的數(shù)據(jù)作為新的訓(xùn)練集儒将,剩下30%的數(shù)據(jù)作為測(cè)試集吏祸。在第一層,在這70%的數(shù)據(jù)上訓(xùn)練多個(gè)模型椅棺,然后去預(yù)測(cè)那30%數(shù)據(jù)的label犁罩,同時(shí)也預(yù)測(cè)test集的label。在第二層两疚,我們就直接用這30%數(shù)據(jù)在第一層預(yù)測(cè)的結(jié)果做為新特征繼續(xù)訓(xùn)練床估,然后用test集第一層預(yù)測(cè)的label做特征,用第二層訓(xùn)練的模型做進(jìn)一步預(yù)測(cè)