【比賽-天池】《幸福感預(yù)測(cè)》全紀(jì)錄

https://tianchi.aliyun.com/competition/entrance/231702/rankingList

一憔鬼、數(shù)據(jù)和模型初探

1.1 數(shù)據(jù)預(yù)處理
# coding=utf-8
import pandas as pd
import numpy as np
from sklearn import preprocessing

df=pd.read_csv(r'/Users/ranmo/Desktop/天池/幸福感/happiness_train_complete.csv',encoding='GB2312',index_col='id')

df = df[df["happiness"]>0]   #原表中幸福度非正的都是錯(cuò)誤數(shù)據(jù),可以剔除12條錯(cuò)誤數(shù)據(jù)

df.dtypes[df.dtypes==object]  #查得有四列不是數(shù)據(jù)類(lèi)型啊楚,需要對(duì)其進(jìn)行轉(zhuǎn)化
for i in range(df.dtypes[df.dtypes==object].shape[0]):
    print(df.dtypes[df.dtypes==object].index[i])
    
    
#轉(zhuǎn)化四列數(shù)據(jù),轉(zhuǎn)換后df全為數(shù)值格式
df["survey_month"] = df["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[1]).astype("int64")   #返回調(diào)查月:用空格來(lái)切分日期和時(shí)間笨使,日期中第1項(xiàng)為月
df["survey_day"] = df["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[2]).astype("int64")   #返回調(diào)查日
df["survey_hour"] = df["survey_time"].transform(lambda line:line.split(" ")[1].split(":")[0]).astype("int64")   #返回調(diào)查小時(shí)
df=df.drop(columns='survey_time')

enc1=preprocessing.OrdinalEncoder()
enc2=preprocessing.OrdinalEncoder()
enc3=preprocessing.OrdinalEncoder()
df['edu_other']=enc1.fit_transform(df['edu_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_)  #查看編碼類(lèi)型

df['property_other']=enc2.fit_transform(df['property_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_)  #查看編碼類(lèi)型

df['invest_other']=enc3.fit_transform(df['invest_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_)  #查看編碼類(lèi)型


#確定X和Y
X=df.drop(columns='happiness').fillna(0)
Y=df.happiness
image.png
1.2 基本模型跑一遍看效果
  • 線性回歸
from sklearn import metrics
from sklearn import linear_model
from sklearn import model_selection
#=============
#1美澳、線性回歸
#=============

#=============
#1.1陵究、普通線性回歸
#=============
reg11=linear_model.LinearRegression()
#交叉驗(yàn)證確定準(zhǔn)確率眠饮,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
#mes1是未取整铜邮,mes2是四舍五入取整,mes3是向下取整仪召,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=reg1.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    e2=metrics.mean_squared_error(np.round(y_pred),y_test)
    e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
    e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
    mes1.append(e1)
    mes2.append(e2)
    mes3.append(e3)
    mes4.append(e4)
print('normal_liner:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()
#表明幾種取整的方案都不是很好,不如回歸的效果松蒜,但是回歸的非整數(shù)也不滿(mǎn)足目標(biāo)值需求扔茅,因此要考慮分類(lèi)


#=============
#1.2、L1的lasso回歸
#=============
reg12=linear_model.Lasso()
#交叉驗(yàn)證確定準(zhǔn)確率秸苗,因?yàn)閷?duì)回歸值會(huì)采用取整操作召娜,所以不用自帶的交叉驗(yàn)證模型
#mes1是未取整,mes2是四舍五入取整,mes3是向下取整惊楼,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=reg2.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    e2=metrics.mean_squared_error(np.round(y_pred),y_test)
    e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
    e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
    mes1.append(e1)
    mes2.append(e2)
    mes3.append(e3)
    mes4.append(e4)
print('Lasso:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()

#=============
#1.3玖瘸、L2的嶺回歸
#=============
reg13=linear_model.Ridge()
#交叉驗(yàn)證確定準(zhǔn)確率秸讹,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
#mes1是未取整雅倒,mes2是四舍五入取整,mes3是向下取整璃诀,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=reg3.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    e2=metrics.mean_squared_error(np.round(y_pred),y_test)
    e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
    e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
    mes1.append(e1)
    mes2.append(e2)
    mes3.append(e3)
    mes4.append(e4)
print('Ridge:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()

#=============
#1.4愉阎、邏輯回歸
#=============
clf14=linear_model.LogisticRegression(penalty='none',solver='saga') #正則會(huì)導(dǎo)致準(zhǔn)確率下降悼嫉,所以不正則
#交叉驗(yàn)證確定準(zhǔn)確率步咪,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes1=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=reg3.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes1.append(e1)
print('LR:')
print(mes1)
print(np.mean(mes1))
print()
print()

#結(jié)論:普通二乘回歸和邏輯回歸效果最好
image.png

image.png
  • SVM
from sklearn import metrics
from sklearn import svm
from sklearn import model_selection
#=============
#2殖演、SVM
#=============
clf2=svm.SVC()   #gamma和C都是默認(rèn)值,沒(méi)有調(diào)參
#交叉驗(yàn)證確定準(zhǔn)確率年鸳,因?yàn)閷?duì)回歸值會(huì)采用取整操作趴久,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf2.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('SVM:')
print(mes)
print(np.mean(mes))
print()
print()

#結(jié)論:效果很一般
image.png
  • KNN
from sklearn import metrics
from sklearn import neighbors
from sklearn import model_selection
#=============
#3、KNN
#=============

for n in range(10,101,10):    #K值肯定會(huì)造成影響
    clf3=neighbors.KNeighborsClassifier(n_neighbors=n)  
    #交叉驗(yàn)證確定準(zhǔn)確率搔确,因?yàn)閷?duì)回歸值會(huì)采用取整操作彼棍,所以不用自帶的交叉驗(yàn)證模型
    mes=[]

    kf=model_selection.KFold(10,shuffle=True)
    for train,test in kf.split(X):
        X_train = X.iloc[train]
        y_train = Y.iloc[train]
        X_test = X.iloc[test]
        y_test = Y.iloc[test]

        y_pred=clf3.fit(X_train,y_train).predict(X_test)
        e1=metrics.mean_squared_error(y_pred,y_test)
        mes.append(e1)
    print('KNN(n=%d):'%n)
    print(mes)
    print(np.mean(mes))
    print()
    print()
#結(jié)論:效果很一般
image.png
  • naive_bayes
from sklearn import metrics
from sklearn import naive_bayes
from sklearn import model_selection

X_new=X   # 本來(lái)想標(biāo)準(zhǔn)化,但發(fā)現(xiàn)標(biāo)準(zhǔn)化后的效果更差膳算,所以就沒(méi)有標(biāo)準(zhǔn)化
#=============
#4座硕、樸素貝葉斯
#=============
clf4=naive_bayes.GaussianNB()   #多想分布樸素貝葉斯跑不通,必須是正定矩陣什么的涕蜂,所以這里用的高斯
#交叉驗(yàn)證確定準(zhǔn)確率华匾,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X_new.iloc[train]
    y_train = Y.iloc[train]
    X_test = X_new.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf4.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('bayes:')
print(mes)
print(np.mean(mes))
print()
print()

#結(jié)論:效果很差机隙,說(shuō)明確實(shí)不適合用高斯貝葉斯蜘拉,如果用多項(xiàng)式貝葉斯想過(guò)可能會(huì)更好
image.png
  • 決策樹(shù)
from sklearn import metrics
from sklearn import tree
from sklearn import model_selection
#=============
#5、決策樹(shù)
#=============

clf5=tree.DecisionTreeClassifier()   
#交叉驗(yàn)證確定準(zhǔn)確率有鹿,因?yàn)閷?duì)回歸值會(huì)采用取整操作旭旭,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf5.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結(jié)論:效果很差
image.png
  • MLP
from sklearn import metrics
from sklearn import neural_network
from sklearn import model_selection
#=============
#6、MLP
#=============

clf6=neural_network.MLPClassifier(hidden_layer_sizes=(10,8,5,3,2),activation='logistic')   #隨意設(shè)置下隱藏層構(gòu)成 
#交叉驗(yàn)證確定準(zhǔn)確率葱跋,因?yàn)閷?duì)回歸值會(huì)采用取整操作持寄,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf6.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結(jié)論:效果竟然還可以,之后可以考慮利用神經(jīng)網(wǎng)絡(luò)調(diào)參

image.png
  • 隨機(jī)森林
from sklearn import metrics
from sklearn import ensemble
from sklearn import model_selection
#=============
#7娱俺、隨機(jī)森林
#=============

clf7=ensemble.RandomForestRegressor(n_estimators=20,n_jobs=-1)   
#交叉驗(yàn)證確定準(zhǔn)確率际看,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf7.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()


#結(jié)論:效果一般矢否,之后考慮調(diào)參


#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline

a=ensemble.RandomForestRegressor(n_estimators=20).fit(X,Y).feature_importances_
temp=np.argsort(a)  #返回index

a=list(a)
a.sort()

b=[]
for i in temp:
    b.append(X.columns[i])

plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)

#參數(shù)結(jié)論:
# 1仲闽、edu_other、property_other僵朗、invest_other這三項(xiàng)轉(zhuǎn)換數(shù)據(jù)都不太重要赖欣,而且property屑彻、invest的各項(xiàng)數(shù)據(jù)似乎都不重要
# 2、前十項(xiàng)中equity顶吮、depresion反映社會(huì)態(tài)度和心態(tài)社牲;
#     class、family_income悴了、floor_area反映財(cái)富;
#     birth搏恤、marital_1st、weight_jin湃交、country反映客觀狀態(tài)
#     survey_day為什么也會(huì)有影響熟空,這是一個(gè)最有疑問(wèn)的指標(biāo)
    
    
image.png

image.png
  • gdbt
from sklearn import metrics
from sklearn import ensemble
from sklearn import model_selection
#=============
#8、gdbt
#=============
clf8=ensemble.GradientBoostingRegressor(max_features=20)   #必須要設(shè)置參數(shù)搞莺,不然跑太慢了
#交叉驗(yàn)證確定準(zhǔn)確率息罗,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf8.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()


#結(jié)論:效果挺好



#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline

a=ensemble.GradientBoostingClassifier().fit(X,Y).feature_importances_
temp=np.argsort(a)  #返回index

a=list(a)
a.sort()

b=[]
for i in temp:
    b.append(X.columns[i])

plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)


image.png
  • xgboost
from sklearn import metrics
import xgboost
from sklearn import model_selection
#=============
#9才沧、xgboost
#=============

clf9=xgboost.XGBRegressor()   
#交叉驗(yàn)證確定準(zhǔn)確率迈喉,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf9.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()


#結(jié)論:簡(jiǎn)直無(wú)語(yǔ)温圆,一來(lái)就取得這么好的效果挨摸。。岁歉。油坝。。


#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline

a=xgboost.XGBRegressor().fit(X,Y).feature_importances_
temp=np.argsort(a)  #返回index

a=list(a)
a.sort()

b=[]
for i in temp:
    b.append(X.columns[i])

plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
image.png
  • lightgbm
from sklearn import metrics
import lightgbm
from sklearn import model_selection
#lighgbm防報(bào)錯(cuò)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

#=============
#10刨裆、LightGBM
#=============

clf10=lightgbm.LGBMRegressor()   
#交叉驗(yàn)證確定準(zhǔn)確率澈圈,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型
mes=[]

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test = X.iloc[test]
    y_test = Y.iloc[test]
    
    y_pred=clf10.fit(X_train,y_train).predict(X_test)
    e1=metrics.mean_squared_error(y_pred,y_test)
    mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()


#結(jié)論:效果也很好帆啃,之后再調(diào)參瞬女。。努潘。诽偷。。


#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline

a=lightgbm.LGBMRegressor().fit(X,Y).feature_importances_
temp=np.argsort(a)  #返回index

a=list(a)
a.sort()

b=[]
for i in temp:
    b.append(X.columns[i])

plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
image.png
1.3 分析結(jié)果

從模型結(jié)果來(lái)看疯坤,gdbt报慕、xgboost和lightgbm的效果都很好,隨機(jī)森林效果很一般压怠,二乘回歸和lR的效果也不錯(cuò)眠冈,之后考慮利用xgboost、lightgbm菌瘫、gdbt和隨機(jī)森林調(diào)參增強(qiáng)模型蜗顽,然后還可以用LR進(jìn)一步融合模型布卡。

采用基本xgboost模型提交結(jié)果,原始數(shù)據(jù)結(jié)果得分為0.48043雇盖,四舍五入得分為0.55394忿等。。崔挖。贸街。

df1=pd.read_csv(r'/Users/ranmo/Desktop/天池/幸福感/happiness_test_complete.csv',encoding='GB2312',index_col='id')

    
    
#轉(zhuǎn)化四列數(shù)據(jù),轉(zhuǎn)換后df全為數(shù)值格式
df1["survey_month"] = df1["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[1]).astype("int64")   #返回調(diào)查月:用空格來(lái)切分日期和時(shí)間狸相,日期中第1項(xiàng)為月
df1["survey_day"] = df1["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[2]).astype("int64")   #返回調(diào)查日
df1["survey_hour"] = df1["survey_time"].transform(lambda line:line.split(" ")[1].split(":")[0]).astype("int64")   #返回調(diào)查小時(shí)
df1=df1.drop(columns='survey_time')



def temp1(a):
    if a not in enc1.categories_[0]:
        return 0
    else:
        return a
df1['edu_other']=enc1.transform(df1['edu_other'].transform(temp1).transform(lambda x:str(x)).values.reshape(-1,1))

def temp2(a):
    if a not in enc2.categories_[0]:
        return 0
    else:
        return a
df1['property_other']=enc2.transform(df1['property_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))

def temp3(a):
    if a not in enc3.categories_[0]:
        return 0
    else:
        return a
df1['invest_other']=enc3.transform(df1['invest_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))



#確定X_test
X_test=df1.fillna(0)

# 結(jié)果1
y_test=xgboost.XGBRegressor().fit(X,Y).predict(X_test)
df1_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')
df1_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/df1_final.csv')
# 結(jié)果1四舍五入
df1_final_round=pd.DataFrame({'id':X_test.index,'happiness':np.round(y_test)}).set_index('id')
df1_final_round.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/df1_final.csv')

二薛匪、超參數(shù)搜索

2.1 xgboost

參考https://blog.csdn.net/han_xiaoyang/article/details/52665396

xgboost的參數(shù)包括:

  • max_depth,這個(gè)參數(shù)的取值最好在3-10之間卷哩。
  • min_child_weight蛋辈,了葉子節(jié)點(diǎn)中属拾,樣本的權(quán)重之和将谊,如果在一次分裂中,葉子結(jié)點(diǎn)上所有樣本的權(quán)重和小于min_child_weight則停止分裂渐白,能夠有效的防止過(guò)擬合尊浓,防止學(xué)到特殊樣本,默認(rèn)設(shè)置為1纯衍。
  • gamma栋齿,繼續(xù)分類(lèi)的損失函數(shù)最小的減少值。 起始值一般比較小襟诸,0~0.2之間就可以瓦堵。
  • subsample, colsample_bytree:subsample是構(gòu)建每棵樹(shù)時(shí)對(duì)樣本進(jìn)行采樣的比例,colsample_bytree是構(gòu)建每科樹(shù)是對(duì)樣本特征的進(jìn)行采樣的比例歌亲,典型值的范圍在0.5-0.9之間菇用,設(shè)置得小容易造成欠擬合。
  • scale_pos_weight: 用來(lái)解決類(lèi)別不平衡問(wèn)題陷揪,加快收斂(調(diào)整不同樣本的學(xué)習(xí)率)惋鸥,具體原理沒(méi)有研究,所以也不用管悍缠。
  • reg_alpha
  • reg_lambda
    等等卦绣。
2.1.1 初始化參數(shù)
image.png
#直接按初始參數(shù)跑基本模型
clf9=xgboost.XGBRegressor(loss_function='RMSE')
clf=model_selection.GridSearchCV(clf9,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計(jì)算score
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
image.png

ps:雖然cv的scoring如果不設(shè)置默認(rèn)是采用訓(xùn)練模型所采用的score方式,但這里不設(shè)置的話結(jié)果不對(duì)飞蚓,umm滤港。∨颗。可能是xgb的默認(rèn)score不是rmse吧蜗搔。劲藐。。

2.1.2 max_depth 和 min_weight 參數(shù)調(diào)優(yōu)
#粗調(diào)max_depth 和 min_weight 
param_test = {
 'max_depth':range(1,10,2),
 'min_child_weight':range(1,6,2)
}

clf=model_selection.GridSearchCV(clf9,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print()
print(clf.best_params_)
image.png

最優(yōu)結(jié)果基礎(chǔ)上樟凄,拓展范圍進(jìn)行精調(diào)

#精調(diào)max_depth 和 min_weight 
param_test = {
 'max_depth':[4,5,6],
 'min_child_weight':[4,5,6]
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print(clf.best_score_)
print(clf.best_params_)
image.png

較原始的0.47178有較好下降聘芜。

2.1.3 gamma參數(shù)調(diào)優(yōu)
#粗調(diào)gamma
#粗調(diào)gamma
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.arange(0,0.5,0.1)
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)
image.png

最優(yōu)結(jié)果是初始化參數(shù),所以不用調(diào)整缝龄。(但為什么結(jié)果又下降了汰现??叔壤?)

2.1.4 調(diào)整subsample 和 colsample_bytree 參數(shù)
#粗調(diào)subsample 和 colsample_bytree
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.arange(0.6,1,0.1),
'colsample_bytree':np.arange(0.6,1,0.1)  
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)
image.png

最優(yōu)參數(shù)在0.9和0.8瞎饲,進(jìn)行精調(diào)

#精調(diào)subsample 和 colsample_bytree
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.arange(0.75,0.86,0.05),
'colsample_bytree':np.arange(0.75,0.86,0.05)  
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)
image.png

精調(diào)尋優(yōu)的結(jié)果是0.75和0.8。(score竟然上漲了炼绘?嗅战?)

2.1.5 正則參數(shù)尋優(yōu)
#粗調(diào)reg_alpha和reg_lambda
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),  
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100],
'reg_lambda':[1e-5, 1e-2, 0.1, 1, 100]  
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)
image.png

在1,0.1附近搜索下是否有更好的參數(shù)。

#精調(diào)reg_alpha和reg_lambda
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),  
'reg_alpha':[0,0.5,1,2,5],
'reg_lambda':[0,0.05,0.1,0.2,0.5]  
}

clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)

最優(yōu)參數(shù)5俺亮,0.1驮捍。

2.1.6 低學(xué)習(xí)速率、多樹(shù)調(diào)試最終結(jié)果
#調(diào)試最終結(jié)果
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),  
'reg_alpha':np.array([5]),
'reg_lambda':np.array([0.1]) ,
'learning_rate':np.array([0.01]),
'n_estimators':np.array([5000]), 
    
}


clf9=xgboost.XGBRegressor(loss_function='RMSE')
clf=model_selection.GridSearchCV(clf9,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計(jì)算score
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])

print(clf.best_score_)
print(clf.best_params_)

image.png

從數(shù)據(jù)上來(lái)看脚曾,效果確實(shí)提升不少东且。由初始的0.47178降為0.46099。不過(guò)降低學(xué)習(xí)率并且增加樹(shù)的數(shù)量后本讥,模型明顯變慢珊泳,同時(shí)在成績(jī)?nèi)〉蒙线€降低了(可能導(dǎo)致過(guò)擬合,或者說(shuō)本身小幅度的提升或者降低都是很正常的)拷沸,因此這里實(shí)際跑模型并沒(méi)有采用低學(xué)習(xí)率和多數(shù)的結(jié)構(gòu)色查。

# 結(jié)果2

from sklearn import metrics
import xgboost
from sklearn import model_selection
from sklearn.externals import joblib
#=============
#xgboost_modified
#=============

clf_xgboost_modified=xgboost.XGBRegressor(max_depth=4,min_child_weight=5,gamma=0,subsample=0.8,colsample_bytree=0.75,reg_alpha=5,reg_lambda=0.1)   
#交叉驗(yàn)證確定準(zhǔn)確率,因?yàn)閷?duì)回歸值會(huì)采用取整操作撞芍,所以不用自帶的交叉驗(yàn)證模型
mes=[]
i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train = X.iloc[train]
    y_train = Y.iloc[train]
    X_test1 = X.iloc[test]
    y_test1 = Y.iloc[test]
    
    clf_xgboost_modified.fit(X_train,y_train)
    y_pred=clf_xgboost_modified.predict(X_test1)
    e1=metrics.mean_squared_error(y_pred,y_test1)
    mes.append(e1)
    joblib.dump(clf_xgboost_modified,filename='/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
    
    y_test=clf_xgboost_modified.predict(X_test)

    df2_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')
    df2_final.to_csv('/Users/ranmo/Desktop/天池/幸福感/xgboost/df2_xgboost_%d.csv'%i)
 
    i+=1
print('clf_xgboost_modified:')
print(mes)
print(np.mean(mes))
print()
print()

最佳成績(jī)?yōu)?.47675秧了。

2.2 lightgbm
2.2.1 初始化參數(shù)

默認(rèn)參數(shù):


image.png
#直接按初始參數(shù)跑基本模型
clf10=lightgbm.LGBMRegressor(metric='l2')   #默認(rèn)default={l2 for regression}
clf=model_selection.GridSearchCV(clf10,{'max_depth':np.array([-1])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計(jì)算score
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
image.png
2.2.2 尋優(yōu)結(jié)果

超參數(shù)搜索本質(zhì)上可以按照xgboost那一套。但涉及到lightGBM自身的優(yōu)化機(jī)制勤庐,有額外的參數(shù)需要設(shè)置:

  • max_depth:相互對(duì)應(yīng)
  • num_leaves:xgboost里面也有一樣功能的max_leaf_nodes(葉節(jié)點(diǎn)的最大數(shù)目)示惊,但是xgb中兩個(gè)參數(shù)只能二選一(好像是按二叉樹(shù)展開(kāi),所以?xún)烧哂泻愣P(guān)系:n平方)愉镰,因此xgb不用設(shè)定米罚。但是lgbm中因?yàn)樗惴C(jī)制還需要額外進(jìn)行優(yōu)化(優(yōu)化后肯定是小于n平方),所以要配合max_depth同時(shí)設(shè)置num_leaves丈探。
  • min_child_samples:葉節(jié)點(diǎn)所需最小樣本數(shù)目录择,xgb沒(méi)有這個(gè)參數(shù)。
  • min_child_weight:一樣,葉節(jié)點(diǎn)所需最小樣本權(quán)重隘竭。
  • min_split_gain對(duì)應(yīng):一樣塘秦,葉節(jié)點(diǎn)所需最小損失減少。
  • subsample:相互對(duì)應(yīng)
  • colsample_bytree:相互對(duì)應(yīng)
  • subsample_freq:lgbm獨(dú)有的动看,配合subsample進(jìn)行尊剔,是進(jìn)行subsample的頻率,默認(rèn)為1菱皆,即每一次都subsample须误,如果為0,則每次都不進(jìn)行subsample而采用全采樣仇轻,為k則是每k次進(jìn)行一次subsample京痢。
  • reg_alpha、reg_lambda篷店、learning_rate祭椰、n_estimators都是通用。

理論上lgbm需要尋優(yōu)的參數(shù)更多疲陕,但每次尋優(yōu)運(yùn)行更快方淤,所以尋有效率還行。鸭轮。

https://www.imooc.com/article/43784?block_id=tuijian_wz

#調(diào)試最終結(jié)果
clf10=lightgbm.LGBMRegressor(metric='l2')   #默認(rèn)default={l2 for regression}

param_test = {
'max_depth':np.array([9]),
'min_child_weight':np.array([0.0001]),
'min_split_gain':np.array([0.4]),
'subsample':np.array([0.5]),
'colsample_bytree':np.array([1]),  
'reg_alpha':np.array([1e-05]),
'reg_lambda':np.array([0.0001]) ,
'learning_rate':np.array([0.1]),
}

clf=model_selection.GridSearchCV(clf10,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)

# 結(jié)論:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}
image.png

rmse由0.47728降為0.47000.
用lightgbm優(yōu)化后的模型提交成績(jī)后臣淤,最優(yōu)成績(jī)?yōu)?.48128橄霉。

2.3 gdbt

https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html#sklearn.ensemble.GradientBoostingRegressor

https://blog.csdn.net/manjhok/article/details/82017696

2.3.1 初始化參數(shù)

默認(rèn)參數(shù):


image.png
#=============
#GDBT_modified
#=============

#直接按初始參數(shù)跑基本模型
clf8=ensemble.GradientBoostingRegressor(loss='ls')   
clf=model_selection.GridSearchCV(clf8,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計(jì)算score
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
image.png
2.3.2 尋優(yōu)結(jié)果

超參數(shù)搜索都是差不多的窃爷,名稱(chēng)上有差異。

#調(diào)試最終結(jié)果
clf8=ensemble.GradientBoostingRegressor(loss='ls')   

param_test = {   
 'max_depth':np.array([2]),
 'min_weight_fraction_leaf':np.array([0.002]), 
 'min_impurity_split':np.array([0.0001]),
 'subsample':np.array([0.96]),
 'max_features':np.array([0.88]),
 'n_estimators':np.array([80]),   
 'learning_rate':np.array([0.2]),    

}

clf=model_selection.GridSearchCV(clf8,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)

# 結(jié)論:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}

rmse由0.47534降為0.47148.
用gbdt優(yōu)化后的模型提交成績(jī)后姓蜂,最優(yōu)成績(jī)?yōu)?.48317按厘。

2.4 隨機(jī)森林

https://blog.csdn.net/u012559520/article/details/77336098

2.4.1 初始化參數(shù)
image.png
#=============
#RandomForest_modified
#=============

#直接按初始參數(shù)跑基本模型
clf7=ensemble.RandomForestRegressor(criterion='mse',n_jobs=-1)   
clf=model_selection.GridSearchCV(clf7,{'min_samples_split':np.array([2])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計(jì)算score
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
image.png
2.4.2 尋優(yōu)參數(shù)
#調(diào)試最終結(jié)果

param_test = {   
 'min_samples_split':np.array([4]),          
 'min_weight_fraction_leaf':np.array([0.01]),   
 'min_impurity_decrease':np.array([0]),   
 'n_estimators':[150],  
 'max_features':[0.8],          #隨機(jī)森林的話這個(gè)不能太高吧
}

clf=model_selection.GridSearchCV(clf7,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)

print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)

# 結(jié)論:{'max_features': 0.8, 'min_impurity_decrease': 0, 'min_samples_split': 4, 'min_weight_fraction_leaf': 0.01, 'n_estimators': 150}

rmse由0.53373降為0.48867
用rf優(yōu)化后的模型提交成績(jī)后,最優(yōu)成績(jī)?yōu)?.51088钱慢。

三逮京、模型融合

3.1 平均融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型
#平均融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型


#交叉驗(yàn)證確定準(zhǔn)確率,因?yàn)閷?duì)回歸值會(huì)采用取整操作束莫,所以不用自帶的交叉驗(yàn)證模型
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
mix_mes=[]

i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train1 = X_train.iloc[train]
    y_train1 = y_train.iloc[train]
    X_test1 = X_train.iloc[test]
    y_test1 = y_train.iloc[test]
    
    xgboost=joblib.load(r'C:\Users\sunsharp\Desktop\學(xué)習(xí)\幸福感\(zhòng)xgboost\xgboost_%d.pkl'%i)
    lightgbm=joblib.load(r'C:\Users\sunsharp\Desktop\學(xué)習(xí)\幸福感\(zhòng)lightgbm\lightgbm_%d.pkl'%i)
    gdbt=joblib.load(r'C:\Users\sunsharp\Desktop\學(xué)習(xí)\幸福感\(zhòng)gdbt\gdbt_%d.pkl'%i)
    
    xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
    lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
    gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
    mix_y_pred=(xgboost_y_pred+lightgbm_y_pred+gdbt_y_pred)/3
    
    
    
    xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
    lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
    gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
    mix_mes.append(metrics.mean_squared_error(mix_y_pred,y_test1))
    
    xgboost_y_test=xgboost.predict(X_test)
    lightgbm_y_test=lightgbm.predict(X_test)
    gdbt_y_test=gdbt.predict(X_test)
    mix_y_test=(xgboost_y_test+lightgbm_y_test+gdbt_y_test)/3

    df_mix_final=pd.DataFrame({'id':X_test.index,'happiness':mix_y_test}).set_index('id')
    df_mix_final.to_csv(r'C:\Users\sunsharp\Desktop\學(xué)習(xí)\幸福感\(zhòng)mixmodel\df_mix_%d.csv'%i)
 
    i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('mix:')
print(mix_mes)
print(np.mean(mix_mes))
print()
image.png

從訓(xùn)練集結(jié)果上看懒棉,結(jié)果有輕微提升。
用融合模型提交成績(jī)后览绿,最優(yōu)成績(jī)?yōu)?.47104策严。

3.2 線性回歸融合x(chóng)gboost + lightgbm + gdbt
#LR融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型


#交叉驗(yàn)證確定準(zhǔn)確率,因?yàn)閷?duì)回歸值會(huì)采用取整操作饿敲,所以不用自帶的交叉驗(yàn)證模型

import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報(bào)錯(cuò)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
from sklearn import linear_model

xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
lrmix_mes=[]

i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train1 = X_train.iloc[train]
    y_train1 = y_train.iloc[train]
    X_test1 = X_train.iloc[test]
    y_test1 = y_train.iloc[test]
    
    print(i)
    
    xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
    lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
    gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
    
    

    
    xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
    lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
    gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
    #訓(xùn)練融合模型
    a=xgboost.fit(X_train1,y_train1).predict(X_train1)
    b=lightgbm.fit(X_train1,y_train1).predict(X_train1)
    c=gdbt.fit(X_train1,y_train1).predict(X_train1)
    lr_mix=linear_model.LinearRegression().fit(np.array([a,b,c]).T,y_train1)
    lrmix_y_pred=lr_mix.predict(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T)
    
    
    xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
    lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
    gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
    lrmix_mes.append(metrics.mean_squared_error(lrmix_y_pred,y_test1))
    
    xgboost_y_test=xgboost.predict(X_test)
    lightgbm_y_test=lightgbm.predict(X_test)
    gdbt_y_test=gdbt.predict(X_test)
    lrmix_y_test=lr_mix.predict(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T)

    df_lrmix_final=pd.DataFrame({'id':X_test.index,'happiness':lrmix_y_test}).set_index('id')
    df_lrmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/lrmixmodel/df_lrmix_%d.csv'%i)
    
    i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('lrmix:')
print(mix_mes)
print(np.mean(lrmix_mes))
print()
    
image.png

效果不是很理想妻导。不理想的原因是因?yàn)閷?duì)訓(xùn)練集再做回歸融合(訓(xùn)練集的成績(jī)能夠達(dá)到0.15),雖然能夠提升訓(xùn)練集模型精度,但是是過(guò)擬合倔韭,然后在測(cè)試集中就不能取得很好的效果术浪。。寿酌。

3.3 加權(quán)融合x(chóng)gboost + lightgbm + gdbt

因?yàn)槠骄诤闲Ч靡人眨貧w融合過(guò)擬合,但是查看了回歸模型的系數(shù)醇疼,和比較接近于1碟联,因此考慮將三者模型進(jìn)行加權(quán)融合(權(quán)重和為1)。

a=np.arange(0,1.1,0.05)
b=np.arange(0,1.1,0.05)
c=np.arange(0,1.1,0.05)

coef_list=[]
for i in a:
    for j in b:
        for k in c:
            if i+j+k==1:
                coef_list.append([i,j,k])
                
                
                
#加權(quán)融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型


#交叉驗(yàn)證確定準(zhǔn)確率僵腺,因?yàn)閷?duì)回歸值會(huì)采用取整操作鲤孵,所以不用自帶的交叉驗(yàn)證模型

import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報(bào)錯(cuò)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble

xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
weightmix_mes=[]

i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train1 = X_train.iloc[train]
    y_train1 = y_train.iloc[train]
    X_test1 = X_train.iloc[test]
    y_test1 = y_train.iloc[test]
    
    print(i)
    
    xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
    lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
    gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
    
    

    
    xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
    lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
    gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
    #訓(xùn)練融合模型
    error_list=[]
    for coef_i in coef_list:
        error_list.append(metrics.mean_squared_error(np.dot(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T,coef_i),y_test1))
    coef=temp[np.argmin(error_list)]
    
    
    xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
    lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
    gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
    weightmix_mes.append(min(error_list))
    
    xgboost_y_test=xgboost.predict(X_test)
    lightgbm_y_test=lightgbm.predict(X_test)
    gdbt_y_test=gdbt.predict(X_test)
    weightmix_y_test=np.dot(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T,coef)

    df_weightmix_final=pd.DataFrame({'id':X_test.index,'happiness':weightmix_y_test}).set_index('id')
    df_weightmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/weightmixmodel/df_weightmix_%d.csv'%i)
    
    i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('weightmix:')
print(weightmix_mes)
print(np.mean(weightmix_mes))
print()
    
image.png

模型結(jié)果有輕微提升。實(shí)際最優(yōu)成績(jī)?yōu)?.47531辰如。

3.4 神經(jīng)網(wǎng)絡(luò)融合x(chóng)gboost + lightgbm + gdbt
#神經(jīng)網(wǎng)絡(luò)融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型


#交叉驗(yàn)證確定準(zhǔn)確率普监,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型

import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報(bào)錯(cuò)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
from sklearn import neural_network

xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
MLPmix_mes=[]

i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
    X_train1 = X_train.iloc[train]
    y_train1 = y_train.iloc[train]
    X_test1 = X_train.iloc[test]
    y_test1 = y_train.iloc[test]
    
    print(i)
    
    xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
    lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
    gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
    
    

    
    xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
    lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
    gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
    #訓(xùn)練融合模型
    a=xgboost.fit(X_train1,y_train1).predict(X_train1)
    b=lightgbm.fit(X_train1,y_train1).predict(X_train1)
    c=gdbt.fit(X_train1,y_train1).predict(X_train1)
    MLP_mix=neural_network.MLPClassifier(hidden_layer_sizes=(5,3,2),activation='logistic').fit(np.array([a,b,c]).T,y_train1)
    MLPmix_y_pred=MLP_mix.predict(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T)
    
    
    xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
    lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
    gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
    MLPmix_mes.append(metrics.mean_squared_error(MLPmix_y_pred,y_test1))
    
    xgboost_y_test=xgboost.predict(X_test)
    lightgbm_y_test=lightgbm.predict(X_test)
    gdbt_y_test=gdbt.predict(X_test)
    MLPmix_y_test=MLP_mix.predict(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T)

    df_MLPmix_final=pd.DataFrame({'id':X_test.index,'happiness':MLPmix_y_test}).set_index('id')
    df_MLPmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/MLPmixmodel/df_MLPmix_%d.csv'%i)
    
    i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('MLPmix:')
print(mix_mes)
print(np.mean(MLPmix_mes))
print()
    
image.png

效果也不理想琉兜。

四凯正、簡(jiǎn)單特征工程

本來(lái)這部分工作應(yīng)該是在建模之前做的,但是現(xiàn)在的集成算法已經(jīng)能夠很好地尋找重要特征豌蟋,并且減小非重要特征的權(quán)重廊散,所以大大減少了尋找特征工程的工作量。但是另一方面梧疲,要尋找好的特征工程并快速提高模型精度是很費(fèi)精力的部分允睹,所以限于此,先跑的模型幌氮,并基于模型給出來(lái)的特征重要性缭受,適當(dāng)進(jìn)行開(kāi)展特征工程。

4.1 去除不重要的特征
image.png

從集中學(xué)習(xí)模型給出的特征重要度來(lái)看该互,不重要的特征主要是:

  • edu_other:考慮去除edu_other
  • invest和invest_other:考慮去除inverst全部項(xiàng)和invest_other
  • property和property_other:考慮去除property_other
  • s_work_type:考慮去除s_work_type
4.1.1 低方差
np.var(X_train)[np.var(X_train)<=np.percentile(np.var(X_train),20)]
image.png

可以看到米者,方差小于0.1的特征項(xiàng):

  • edu_other
  • property_0、property_3~property_7
  • invest_0~invest_8
    之后會(huì)移除這部分特征項(xiàng)
4.1.2 卡方校驗(yàn)

卡方校驗(yàn)的時(shí)候發(fā)現(xiàn)出現(xiàn)非正定矩陣無(wú)法校驗(yàn)宇智,進(jìn)一步檢驗(yàn)發(fā)現(xiàn)數(shù)據(jù)項(xiàng)中有很多負(fù)值部分:


image.png

所以其實(shí)原始數(shù)據(jù)中有錯(cuò)誤數(shù)據(jù)蔓搞,并且在建模前就應(yīng)該處理。
這里將負(fù)值都處理為該特征項(xiàng)的眾數(shù)随橘,并進(jìn)行卡方校驗(yàn)喂分。

X_train_new=X_train

#負(fù)值處理為眾數(shù)
dict_temp={}
for i in X_train_new.columns:
    dict_temp[i]=X_train_new[i].value_counts().index[0]

for i in dict_temp.keys():
    X_train_new[i][X_train_new[i]<0]=dict_temp[i]

#處理完之后竟然還有負(fù)值,那就直接處理為其絕對(duì)值
X_train_new=np.abs(X_train_new)
p_value=feature_selection.chi2(X_train_new,y_train)[1]
p_value[np.isnan(p_value)]=0  #有0值



#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline

temp=np.argsort(-p_value)  #返回index

p_value=list(p_value)
p_value=np.sort(p_value)

b=[]
for i in temp:
    b.append(X_train_new.columns[i])

plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,p_value,)
image.png

image.png

可以看到太防,與目標(biāo)變量密切相關(guān)的主要是:

  • income收入部分妻顶;
  • marital婚姻情況酸员;
  • 自己以及父母的出生年份;
  • public_service對(duì)公共服務(wù)的滿(mǎn)意度等等讳嘱;

而前文提到的edu_other幔嗦、property_0、property_3property_7沥潭、invest_0invest_8基本上屬于無(wú)關(guān)變量邀泉,唯一的特例是invest_6的p值較高,但是這里仍然進(jìn)行移除钝鸽。

4.2 修正模型
##最后一次平均融合

#平均融合x(chóng)gboost + lightgbm + gdbt現(xiàn)有模型


#交叉驗(yàn)證確定準(zhǔn)確率汇恤,因?yàn)閷?duì)回歸值會(huì)采用取整操作,所以不用自帶的交叉驗(yàn)證模型

import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報(bào)錯(cuò)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble

xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
mix_mes=[]

i=0

kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X_train_new):
    X_train1 = X_train_new.iloc[train]
    y_train1 = y_train.iloc[train]
    X_test1 = X_train_new.iloc[test]
    y_test1 = y_train.iloc[test]
    
    print(i)
    
    xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/xgboost/xgboost_%d.pkl'%i)
    lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/lightgbm/lightgbm_%d.pkl'%i)
    gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/gdbt/gdbt_%d.pkl'%i)
    
    xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
    lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
    gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
    mix_y_pred=(xgboost_y_pred+lightgbm_y_pred+gdbt_y_pred)/3
    
    
    
    xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
    lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
    gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
    mix_mes.append(metrics.mean_squared_error(mix_y_pred,y_test1))
    
    xgboost_y_test=xgboost.predict(X_test_new)
    lightgbm_y_test=lightgbm.predict(X_test_new)
    gdbt_y_test=gdbt.predict(X_test_new)
    mix_y_test=(xgboost_y_test+lightgbm_y_test+gdbt_y_test)/3

    df_mix_final=pd.DataFrame({'id':X_test.index,'happiness':mix_y_test}).set_index('id')
    df_mix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/feature/mixmodel/df_mix_%d.csv'%i)
    
    i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('mix:')
print(mix_mes)
print(np.mean(mix_mes))
print()

image.png

從結(jié)果上看拔恰,經(jīng)過(guò)簡(jiǎn)單特征工程處理的模型和原有模型能夠達(dá)到的最優(yōu)結(jié)果是差不多的因谎,所以確實(shí)是因?yàn)榧伤惴ㄒ呀?jīng)能夠很好地處理特征了。颜懊。
最后用隨機(jī)種子嘗試了最終的優(yōu)化(在模型穩(wěn)定的基礎(chǔ)上并無(wú)太大意義财岔,只是看分?jǐn)?shù)能不能高一點(diǎn)而已),baseline為0.47098河爹。

over匠璧。

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個(gè)濱河市咸这,隨后出現(xiàn)的幾起案子夷恍,更是在濱河造成了極大的恐慌,老刑警劉巖媳维,帶你破解...
    沈念sama閱讀 218,755評(píng)論 6 507
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件酿雪,死亡現(xiàn)場(chǎng)離奇詭異,居然都是意外死亡侨艾,警方通過(guò)查閱死者的電腦和手機(jī)执虹,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 93,305評(píng)論 3 395
  • 文/潘曉璐 我一進(jìn)店門(mén)拓挥,熙熙樓的掌柜王于貴愁眉苦臉地迎上來(lái)唠梨,“玉大人,你說(shuō)我怎么就攤上這事侥啤〉卑龋” “怎么了?”我有些...
    開(kāi)封第一講書(shū)人閱讀 165,138評(píng)論 0 355
  • 文/不壞的土叔 我叫張陵盖灸,是天一觀的道長(zhǎng)蚁鳖。 經(jīng)常有香客問(wèn)我,道長(zhǎng)赁炎,這世上最難降的妖魔是什么醉箕? 我笑而不...
    開(kāi)封第一講書(shū)人閱讀 58,791評(píng)論 1 295
  • 正文 為了忘掉前任钾腺,我火速辦了婚禮,結(jié)果婚禮上讥裤,老公的妹妹穿的比我還像新娘放棒。我一直安慰自己,他們只是感情好己英,可當(dāng)我...
    茶點(diǎn)故事閱讀 67,794評(píng)論 6 392
  • 文/花漫 我一把揭開(kāi)白布间螟。 她就那樣靜靜地躺著,像睡著了一般损肛。 火紅的嫁衣襯著肌膚如雪厢破。 梳的紋絲不亂的頭發(fā)上,一...
    開(kāi)封第一講書(shū)人閱讀 51,631評(píng)論 1 305
  • 那天治拿,我揣著相機(jī)與錄音摩泪,去河邊找鬼。 笑死劫谅,一個(gè)胖子當(dāng)著我的面吹牛加勤,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播同波,決...
    沈念sama閱讀 40,362評(píng)論 3 418
  • 文/蒼蘭香墨 我猛地睜開(kāi)眼鳄梅,長(zhǎng)吁一口氣:“原來(lái)是場(chǎng)噩夢(mèng)啊……” “哼!你這毒婦竟也來(lái)了未檩?” 一聲冷哼從身側(cè)響起戴尸,我...
    開(kāi)封第一講書(shū)人閱讀 39,264評(píng)論 0 276
  • 序言:老撾萬(wàn)榮一對(duì)情侶失蹤,失蹤者是張志新(化名)和其女友劉穎冤狡,沒(méi)想到半個(gè)月后孙蒙,有當(dāng)?shù)厝嗽跇?shù)林里發(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 45,724評(píng)論 1 315
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡悲雳,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 37,900評(píng)論 3 336
  • 正文 我和宋清朗相戀三年挎峦,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片合瓢。...
    茶點(diǎn)故事閱讀 40,040評(píng)論 1 350
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡坦胶,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出晴楔,到底是詐尸還是另有隱情顿苇,我是刑警寧澤,帶...
    沈念sama閱讀 35,742評(píng)論 5 346
  • 正文 年R本政府宣布税弃,位于F島的核電站纪岁,受9級(jí)特大地震影響,放射性物質(zhì)發(fā)生泄漏则果。R本人自食惡果不足惜幔翰,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 41,364評(píng)論 3 330
  • 文/蒙蒙 一漩氨、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧遗增,春花似錦才菠、人聲如沸。這莊子的主人今日做“春日...
    開(kāi)封第一講書(shū)人閱讀 31,944評(píng)論 0 22
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)。三九已至缓待,卻和暖如春蚓耽,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背旋炒。 一陣腳步聲響...
    開(kāi)封第一講書(shū)人閱讀 33,060評(píng)論 1 270
  • 我被黑心中介騙來(lái)泰國(guó)打工步悠, 沒(méi)想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留,地道東北人瘫镇。 一個(gè)月前我還...
    沈念sama閱讀 48,247評(píng)論 3 371
  • 正文 我出身青樓鼎兽,卻偏偏與公主長(zhǎng)得像,于是被迫代替她去往敵國(guó)和親铣除。 傳聞我的和親對(duì)象是個(gè)殘疾皇子谚咬,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 44,979評(píng)論 2 355

推薦閱讀更多精彩內(nèi)容