任務(wù)3 - 建模(2天)
用邏輯回歸术幔、svm和決策樹;隨機(jī)森林和XGBoost進(jìn)行模型構(gòu)建湃密,評分方式任意诅挑,如準(zhǔn)確率等四敞。(不需要考慮模型調(diào)參)
時(shí)間: 2天
我的結(jié)果
import pandas as pd
import warnings
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
# 之前的數(shù)據(jù)處理
data = pd.read_csv('data.csv', encoding='gbk')
data_clean = data.drop_duplicates()
drop_columns = ['Unnamed: 0', 'custid', 'trade_no', 'bank_card_no', 'source',
'id_name', 'latest_query_time', 'loans_latest_time' ]
for data_col in data.columns:
if len(data[data_col].unique()) == 1 and data_col not in drop_columns:
drop_columns.append(data_col)
data_clean = data_clean.drop(drop_columns, axis=1)
data_clean = pd.get_dummies(data_clean, columns=['reg_preference_for_trad'])
data_clean['student_feature'].fillna(0, inplace=True)
data_cols = data_clean.columns.values
for data_col in data_cols:
fill_value = data_clean[data_col].value_counts().index[0]
data_clean[data_col].fillna(fill_value, inplace=True)
# 采樣標(biāo)簽
df_y = data_clean['status']
# 去除標(biāo)簽
df_X = data_clean.drop(columns=['status'])
# 將數(shù)據(jù)轉(zhuǎn)化為標(biāo)準(zhǔn)數(shù)據(jù)
df_X = scale(df_X, axis=0)
# 建立模型
lr = LogisticRegression(random_state=2018,tol=1e-6) # 邏輯回歸模型
tree = DecisionTreeClassifier(random_state=2018) #決策樹模型
svm = SVC(probability=True,random_state=2018,tol=1e-6) # SVM模型
forest=RandomForestClassifier(n_estimators=100,random_state=2018) # 隨機(jī)森林
Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT
Xgbc=XGBClassifier(random_state=2018) #Xgbc
# 驗(yàn)證
def muti_score(model):
warnings.filterwarnings('ignore')
accuracy = cross_val_score(model, df_X, df_y, scoring='accuracy', cv=5)
precision = cross_val_score(model, df_X, df_y, scoring='precision', cv=5)
recall = cross_val_score(model, df_X, df_y, scoring='recall', cv=5)
f1_score = cross_val_score(model, df_X, df_y, scoring='f1', cv=5)
auc = cross_val_score(model, df_X, df_y, scoring='roc_auc', cv=5)
print("準(zhǔn)確率:",accuracy.mean())
print("精確率:",precision.mean())
print("召回率:",recall.mean())
print("F1_score:",f1_score.mean())
print("AUC:",auc.mean())
model_name = ["lr", "tree", "svm", "forest", "Gbdt", "Xgbc"]
for name in model_name:
model=eval(name)
print(name)
muti_score(model)
運(yùn)行結(jié)果:
lr
準(zhǔn)確率: 0.790281386338364
精確率: 0.6593920530150161
召回率: 0.3394817341162406
F1_score: 0.448020547401347
AUC: 0.7844017218172162
tree
準(zhǔn)確率: 0.6855215864861635
精確率: 0.3843921937868717
召回率: 0.4199571041805844
F1_score: 0.40104848437486407
AUC: 0.5972291706193749
svm
準(zhǔn)確率: 0.7858656443483919
精確率: 0.7316569883211543
召回率: 0.23137372103653178
F1_score: 0.35083219482550787
AUC: 0.7648360306702487
forest
準(zhǔn)確率: 0.7913335761002059
精確率: 0.7034687227673737
召回率: 0.28838648430083336
F1_score: 0.4078894217688266
AUC: 0.77971546689502
Gbdt
準(zhǔn)確率: 0.7955410050455514
精確率: 0.6673074901039089
召回率: 0.36884427411131815
F1_score: 0.4746696021847946
AUC: 0.789352848120004
Xgbc
準(zhǔn)確率: 0.7959609498788722
精確率: 0.6740591443898865
召回率: 0.3621215850356879
F1_score: 0.47097961303418573
AUC: 0.7906816486380361
Process finished with exit code 0