基于FastText的文本分類
step1.轉(zhuǎn)換為FastText需要的格式
import pandas as pd
from sklearn.metrics import f1_score
train_df = pd.read_csv('/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task01_preparing_20200719/input/train_set.csv', *sep*='t', *nrows*=15000)
train_df['label_ft'] = '__label__' + train_df['label'].astype(*str*)
train_df[['text','label_ft']].iloc[:-5000].to_csv('train.csv', *index*=None, *header*=None, *sep*='t')
step2.FastText分類
import fasttext
model = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred = [model.predict(x)[0][0].split('__')[-1] for x in train_df.iloc[-5000:]['text']]
print(f1_score(train_df['label'].values[-5000:].astype(*str*), val_pred, *average*='macro'))
Output:
>>> 0.824871229687983
test:增加樣本量至10w
train_df2 = pd.read_csv('/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task01_preparing_20200719/input/train_set.csv', *sep*='t', *nrows*=100000)
train_df2['label_ft'] = '__label__' + train_df2['label'].astype(*str*)
train_df2[['text','label_ft']].iloc[:-5000].to_csv('train.csv', *index*=None, *header*=None, *sep*='t')\
model2 = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred2 = [model2.predict(x)[0][0].split('__')[-1] for x in train_df2.iloc[-5000:]['text']]
print(f1_score(train_df2['label'].values[-5000:].astype(*str*), val_pred2, *average*='macro'))
Output:
>>> 0.9031919041861232
本章作業(yè)
'''
Todo: 1.閱讀文檔桥温,弄清楚參數(shù)的大致含義,哪些參數(shù)會增加模型的復雜度 2.在驗證集上進行驗證模型精度蹦疑,找到模型在是否過擬合還是欠擬合
參考:
https://pypi.org/project/fasttext/
https://zhuanlan.zhihu.com/p/66739066
'''
閱讀FastText的文檔捏境,嘗試修改參數(shù),得到更好的分數(shù)
# def train_model(ipt=None, opt=None, model='', dim=100, epoch=5, lr=0.1, loss='softmax'):
# np.set_printoptions(suppress=True)
# if os.path.isfile(model):
# classifier = fasttext.load_model(model)
# else:
# classifier = fasttext.train_supervised(ipt, label='__label__', dim=dim, epoch=epoch, lr=lr, wordNgrams=2, loss=loss)
# """
# 訓練一個監(jiān)督模型, 返回一個模型對象
# @param input: 訓練數(shù)據(jù)文件路徑
# @param lr: 學習率
# @param dim: 向量維度
# @param ws: cbow模型時使用
# @param epoch: 次數(shù)
# @param minCount: 詞頻閾值, 小于該值在初始化時會過濾掉
# @param minCountLabel: 類別閾值吃警,類別小于該值初始化時會過濾掉
# @param minn: 構(gòu)造subword時最小char個數(shù)
# @param maxn: 構(gòu)造subword時最大char個數(shù)
# @param neg: 負采樣
# @param wordNgrams: n-gram個數(shù)
# @param loss: 損失函數(shù)類型, softmax, ns: 負采樣, hs: 分層softmax
# @param bucket: 詞擴充大小, [A, B]: A語料中包含的詞向量, B不在語料中的詞向量
# @param thread: 線程個數(shù), 每個線程處理輸入數(shù)據(jù)的一段, 0號線程負責loss輸出
# @param lrUpdateRate: 學習率更新
# @param t: 負采樣閾值
# @param label: 類別前綴
# @param verbose: ??
# @param pretrainedVectors: 預(yù)訓練的詞向量文件路徑, 如果word出現(xiàn)在文件夾中初始化不再隨機
# @return model object
# """
# classifier.save_model(opt)
# return classifier
# dim = 100
# lr = 5
# epoch = 5
# model = f'data_dim{str(dim)}_lr0{str(lr)}_iter{str(epoch)}.model'
# classifier = train_model(ipt='data_train.txt',
# opt=model,
# model=model,
# dim=dim, epoch=epoch, lr=0.5
# )
# result = classifier.test('data_test.txt')
# print(result)
model3 = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=5,*verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred3 = [model3.predict(x)[0][0].split('__')[-1] for x in train_df2.iloc[-5000:]['text']]
print(f1_score(train_df2['label'].values[-5000:].astype(*str*), val_pred3, *average*='macro'))
Output:
# 原始:lr=1.0, wordNgrams=2, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.9031919041861232
# 參數(shù)調(diào)整净赴,學習率哩陕、向量維度、次數(shù)危虱、詞頻閾值羊娃、n-gram個數(shù)
## lr=5.0, wordNgrams=2, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8965417666847781
## lr=0.5, wordNgrams=2, dim=128, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.900467944939494
## lr=0.5, wordNgrams=2, dim=64, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8988977134190188
## lr=1.0, wordNgrams=2, dim=128, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8994338390877419
## lr=1.0, wordNgrams=2, dim=128, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.9037370211866124
## lr=1.0, wordNgrams=5, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.913202595672358
使用驗證集調(diào)參,優(yōu)化模型
'''
十折交叉驗證
(1) 留出法 Hold-out method 將原始數(shù)據(jù)隨機分為兩組,一組做為訓練集,一組做為驗證集,利用訓練集訓練分類器,然后利用驗證集驗證模型,記錄最后的分類準確率
(2)十折交叉驗證 10-fold cross-validation 將數(shù)據(jù)集分成十份埃跷,輪流將其中9份作為訓練數(shù)據(jù)蕊玷,1份作為測試數(shù)據(jù),進行試驗弥雹。 每次試驗都會得出相應(yīng)的正確率(或差錯率)垃帅。10次的結(jié)果的正確率(或差錯率)的平均值作為對算法精度的估計,一般還需要進行多次10折交叉驗證(例如10次10折交叉驗證)剪勿,再求其均值贸诚,作為對算法準確性的估計。
(3) 留一驗證 leave-one-out cross-validation 留一驗證(LOOCV)意指只使用原本樣本中的一項來當做驗證資料厕吉, 而剩余的則留下來當做訓練資料酱固。 這個步驟一直持續(xù)到每個樣本都被當做一次驗證資料。 事實上赴涵,這等同于 K-fold 交叉驗證是一樣的媒怯,其中K為原本樣本個數(shù)订讼。 在某些情況下是存在有效率的演算法髓窜,如使用kernel regression 和Tikhonov regularization。
參考:
https://blog.csdn.net/Dream_angel_Z/article/details/47110077 ? https://blog.csdn.net/Tunnel_/article/details/107614991
'''
用StratifiedKFold實現(xiàn)十折交叉劃分
step1.將原始數(shù)據(jù)進行十折交叉劃分欺殿,分成十份訓練集和測試集并保存寄纵。
from sklearn.model_selection import StratifiedKFold
print('starting K10 cross-validation data split:')
# train_df = pd.read_csv('data/train_set.csv', sep='t')
# 分層采樣,確保分出來的訓練集和測試集中各類別樣本的比例與原始數(shù)據(jù)集中相同脖苏。
skf = StratifiedKFold(n_splits=10)
for n_fold, (tr_idx, val_idx) in enumerate(skf.split(train_df2['text'],train_df2['label'])):
print(f'the {n_fold} data split ...')
tr_x, tr_y, val_x, val_y = train_df2['text'].iloc[tr_idx], train_df2['label'][tr_idx], train_df2['text'].iloc[val_idx], train_df2['label'][val_idx]
tr_y = '__label__' + tr_y.astype(*str*)
traindata = pd.DataFrame(*list*(zip(tr_x.values, tr_y.values)))
traindata.to_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/train_split{n_fold}.csv', *index*=None, *header*=['text', 'label_ft'], *sep*='t')
testdata = pd.DataFrame(*list*(zip(val_x.values, val_y.values)))
testdata.to_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/test_split{n_fold}.csv', *index*=None, *header*=['text', 'label'], *sep*='t')
# >>> ...
# the 0 data split ...
# the 1 data split ...
# the 2 data split ...
# the 3 data split ...
# the 4 data split ...
# the 5 data split ...
# the 6 data split ...
# the 7 data split ...
# the 8 data split ...
# the 9 data split ...
step2.利用十折交叉驗證調(diào)參
print('starting K10 cross-validation training:')
val_f1=[]
for n_fold in range(10):
model = fasttext.train_supervised(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/train_split{n_fold}.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*='hs')
val_df = pd.read_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/test_split{n_fold}.csv', *sep*='t')
val_pred = [model.predict(x)[0][0].split('__')[-1] for x in val_df['text']]
val_f1.append(f1_score(val_df['label'].values.astype(*str*), val_pred, *average*='macro'))
print(f'the f1_score of {n_fold} training is:', val_f1[n_fold])
print()
print('The average f1_score is', sum(val_f1)/len(val_f1))
Output:
# Progress: 100.0% words/sec/thread: 2088734 lr: 0.000000 avg.loss: 0.092633 ETA: 0h 0m 0s
# the f1_score of 0 training is: 0.9058600633604391
# Progress: 100.0% words/sec/thread: 2114597 lr: 0.000000 avg.loss: 0.092621 ETA: 0h 0m 0s
# the f1_score of 1 training is: 0.9017374331381326
# Progress: 100.0% words/sec/thread: 2175324 lr: 0.000000 avg.loss: 0.096158 ETA: 0h 0m 0s
# the f1_score of 2 training is: 0.9039268911845869
# Progress: 100.0% words/sec/thread: 2072916 lr: 0.000000 avg.loss: 0.091677 ETA: 0h 0m 0s
# the f1_score of 3 training is: 0.902637362408513
# Progress: 100.0% words/sec/thread: 2137606 lr: 0.000000 avg.loss: 0.094607 ETA: 0h 0m 0s
# the f1_score of 4 training is: 0.9002063369408493
# Progress: 100.0% words/sec/thread: 2022383 lr: 0.000000 avg.loss: 0.091986 ETA: 0h 0m 0s
# the f1_score of 5 training is: 0.9077481432048907
# Progress: 100.0% words/sec/thread: 2183498 lr: 0.000000 avg.loss: 0.093751 ETA: 0h 0m 0s
# the f1_score of 6 training is: 0.9081961005846353
# Progress: 100.0% words/sec/thread: 2033608 lr: 0.000000 avg.loss: 0.094676 ETA: 0h 0m 0s
# the f1_score of 7 training is: 0.8997523965098473
# Progress: 100.0% words/sec/thread: 2114997 lr: 0.000000 avg.loss: 0.093399 ETA: 0h 0m 0s
# the f1_score of 8 training is: 0.9106292751332932
# Progress: 100.0% words/sec/thread: 2104720 lr: 0.000000 avg.loss: 0.092281 ETA: 0h 0m 0s
# the f1_score of 9 training is: 0.8980215563264459
# The average f1_score is 0.9038715558791633
# >>> 較原始0.9031919041861232提升了0.07%