智能投標(biāo)
智能投標(biāo)這里采用了兩種算法饰潜,但是總體思路都是先篩選出來(lái)可能虧損的標(biāo)的然后再用一個(gè)回歸器來(lái)預(yù)測(cè)具體選擇這個(gè)標(biāo)的的收益情況。
代碼如下:
首先是用到的包包的導(dǎo)入蓉驹,這里一般我都是固定的會(huì)導(dǎo)入pandas列另,numpy還有畫(huà)圖的代碼,并且在畫(huà)圖的時(shí)候?yàn)榱俗屗@示美觀都會(huì)做如下的設(shè)置往弓,所以其實(shí)可以直接寫(xiě)入配置文件中疏唾,參考(配置你的jupyter)
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.plotly as py
import plotly.graph_objs as go
%matplotlib inline
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import linear_model, preprocessing
from IPython.display import display
pd.set_option('display.max_columns', 9999)
pd.set_option('display.float_format', lambda x: '%.4f' % x)
sns.set_style("darkgrid",{"font.sans-serif":['simhei', 'Arial']}) # for chinese display
import matplotlib as mpl
mpl.rcParams['axes.unicode_minus'] = False # for minus display
import datetime
import xgboost as xgb
下一步是讀取數(shù)據(jù)并且對(duì)于數(shù)據(jù)進(jìn)行處理:
1) 時(shí)間類型的處理以及兩個(gè)表的拼接
df_lc = pd.read_csv('./LC.csv')
df_lc['借款成功日期'] = pd.to_datetime(df_lc['借款成功日期'])
df_lp = pd.read_csv('./LP.csv')
df_lp['到期日期'] = pd.to_datetime(df_lp['到期日期'])
df_lp['還款日期'] = pd.to_datetime(df_lp['還款日期'].str.replace('\\\\N', '2099-12-31'))
df_lp['還款狀態(tài)'] = df_lp['還款狀態(tài)'].astype(int)
df_lp = df_lp[df_lp['到期日期'] < '2016-11-01'] # 最晚的recorddate是2017-02-22,所以選2016-11-01前到期來(lái)計(jì)算三個(gè)月逾期率是比較合適的
df_lc = df_lc[df_lc.ListingId.isin(df_lp.ListingId.unique())]
expected_pp = df_lp.groupby('ListingId')['應(yīng)還本金'].sum()
ind_consistent = np.isclose(df_lc[['ListingId', '借款金額']].set_index('ListingId')['借款金額'], expected_pp)
df_lc = df_lc[df_lc.ListingId.isin(expected_pp[ind_consistent].index)]
df_lp = df_lp[df_lp.ListingId.isin(expected_pp[ind_consistent].index)]
expected_ret = df_lp.groupby('ListingId')[['應(yīng)還本金', '應(yīng)還利息']].sum().sum(axis=1)
pp = df_lc[['ListingId', '借款金額']].set_index('ListingId')['借款金額']
real_ret = expected_ret - df_lp.groupby('ListingId')[['剩余本金', '剩余利息']].sum().sum(axis=1)
real_ROI = real_ret / pp
df_lc = pd.merge(df_lc, real_ROI.rename('real_ROI').to_frame(), how='left', left_on='ListingId', right_index=True)
# ROI per month
df_lc['real_ROI_per_year'] = ((df_lc['real_ROI'] - 1) / df_lc['借款期限']) * 12
df_lc.head()
2) 類別變量的編碼
X['初始評(píng)級(jí)'] = preprocessing.LabelEncoder().fit_transform(X['初始評(píng)級(jí)'])
X = pd.concat([X, pd.get_dummies(X['借款類型'], prefix='借款類型')], axis=1).drop('借款類型', axis=1)
X['是否首標(biāo)'] = preprocessing.LabelEncoder().fit_transform(X['是否首標(biāo)'])
X['性別'] = preprocessing.LabelEncoder().fit_transform(X['性別'])
X['手機(jī)認(rèn)證'] = np.where(X['手機(jī)認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X['戶口認(rèn)證'] = np.where(X['戶口認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X['視頻認(rèn)證'] = np.where(X['視頻認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X['學(xué)歷認(rèn)證'] = np.where(X['學(xué)歷認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X['征信認(rèn)證'] = np.where(X['征信認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X['淘寶認(rèn)證'] = np.where(X['淘寶認(rèn)證'] == '未成功認(rèn)證', 0, 1)
X.head()
3)特征離散化
def add_ends_to_bins(bins):
return np.append(np.insert(bins, 0, -np.inf), np.inf)
dict_s_fea_dct = {}
s_amount_C = df_lc_C['借款金額']
q = [0.05, 0.2, 0.5, 0.8, 0.95]
bins = s_amount_C.quantile(q)
s_fea_dct_amount_C = pd.cut(s_amount_C, add_ends_to_bins(bins.values))
dict_s_fea_dct['amount'] = s_fea_dct_amount_C
可以畫(huà)圖說(shuō)明:
x_quantile = np.arange(0.01, 1., 0.01)
df_lc['real_ROI_per_year'].quantile(x_quantile)
plt.plot(x_quantile, df_lc['real_ROI_per_year'].quantile(x_quantile));
先訓(xùn)練二分類模型:
y = df_lc.real_ROI_per_year < 0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42, stratify=y)
xgb_params = {
'eta': 0.01,
'max_depth': 13,
'subsample': 0.7,
'colsample_bytree': 0.7,
'scale_pos_weight': (y_train == 0).sum() / (y_train == 1).sum(),
'max_delta_step': 1,
'objective': 'binary:logistic',
'eval_metric': 'error',
'silent': 1,
'shuffle': True,
}
dtrain = xgb.DMatrix(X_train.values, y_train.values)
dtest = xgb.DMatrix(X_test.values, y_test.values)
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=10000, early_stopping_rounds=20,
verbose_eval=50, show_stdv=False)
cv_output[['train-error-mean', 'test-error-mean']].plot();
按交叉驗(yàn)證得到的參數(shù)重新訓(xùn)練:
num_boost_rounds = len(cv_output)
model = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round=num_boost_rounds)
保存模型:
model.save_model('xgb_clf_first_step_maxdepth_13_for_default.model')
讀取模型(如果之前已保存函似,則不用做之前的訓(xùn)練步驟):
model_for_default = xgb.Booster({'nthread':4}) #init model
model_for_default.load_model('xgb_clf_first_step_maxdepth_13_for_default.model') # load data
查看特征重要性:
fig, ax = plt.subplots(1, 1, figsize=(8, 13))
xgb.plot_importance(model_for_default, height=0.5, ax=ax);
fscore = model_for_default.get_fscore()
s_fscore = pd.Series({int(k.lstrip('f')): v for k, v in fscore.items()})
s_fscore.index = X.columns
s_fscore.sort_values(ascending=False)
model_for_default.predict(dtest)
confusion_matrix(y_test, model_for_default.predict(dtest) > 0.02)