真假新聞數(shù)據(jù)集下載地址:https://www.kaggle.com/c/fake-news/data#
本文采用LSTM進(jìn)行真假新聞的判別,是二分類任務(wù)。
一霸褒、導(dǎo)入包
我的tensorflow际度,keras版本是2.7.0
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
import re
from nltk.corpus import stopwords
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from nltk.stem.porter import PorterStemmer
from sklearn.metrics import accuracy_score
from keras.models import load_model
二、讀取數(shù)據(jù)
df=pd.read_csv('train.csv')
df=df.dropna() ##去掉有nan的行
## Get the Independent Features
X=df.drop('label',axis=1) ##x是去掉標(biāo)簽列的數(shù)據(jù)
y=df['label'] ##標(biāo)簽列
### Vocabulary size
voc_size=5000
messages=X.copy()
messages.reset_index(inplace=True) ##重置索引滞乙,因?yàn)橛袆h除操作
三、文本處理
對(duì)文本進(jìn)行處理。
例如:
"President Obama and President-Elect Donald Trump Meet at White House: Share:" 處理完——>
presid obama presid elect donald trump meet white hous share
ps = PorterStemmer()
corpus = []
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['title'][i]) ##只保留字母
review = review.lower() ##轉(zhuǎn)為小寫
review = review.split() ##按空白拆分
#去停用詞
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review) ##將列表轉(zhuǎn)為str類型
corpus.append(review)
四枯夜、格式轉(zhuǎn)化
要將文本數(shù)據(jù)轉(zhuǎn)為深度學(xué)習(xí)的輸入格式
onehot_repr=[one_hot(words,voc_size) for words in corpus] ##文本轉(zhuǎn)為one_hot編碼格式
sent_length=20
embedded_docs=pad_sequences(onehot_repr,padding='pre',maxlen=sent_length) ##用0前向填充,因?yàn)槲谋鹃L(zhǎng)度不一致
##轉(zhuǎn)成np.array數(shù)組格式
X_final=np.array(embedded_docs)
y_final=np.array(y)
##劃分訓(xùn)練集和測(cè)試集
X_train, X_test, y_train, y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)
五艰山、模型的建立湖雹,訓(xùn)練和預(yù)測(cè)
##keras序貫?zāi)P?embedding_vector_features=40
model=Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_length))
model.add(Dropout(0.3))
model.add(LSTM(100)) ##Bidirectional(LSTM(100))雙向LSTM替換即可
model.add(Dropout(0.3))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
##開始訓(xùn)練
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
##預(yù)測(cè)
y_pred= model.predict(X_test) #輸出是一個(gè)離散值,越接近1就表示是1的概率越大
六曙搬、模型評(píng)估保存
##值大于0.5就是1摔吏,小于0.5就是0
def probability_to_intres(data):
res = []
for i in data:
if i > 0.5:
res.append(1)
else:
res.append(0)
return res
res = probability_to_intres(y_pred)
confusion_matrix(y_test,res) ##評(píng)估矩陣
accuracy_score(y_test,res) ##準(zhǔn)確率
model.save('my_model.h5') ##保存
七、若新數(shù)據(jù)來了
如果來新數(shù)據(jù)了纵装,判斷是否是虛假新聞
new_text = "President Obama and President-Elect Donald Trump Meet at White House: Share:"
##上述方法進(jìn)行文本處理征讲,轉(zhuǎn)化格式
review1 = re.sub('[^a-zA-Z]', ' ', new_text)
review1 = review1.lower()
review1 = review1.split()
review1 = [ps.stem(word) for word in review1 if not word in stopwords.words('english')]
review1 = ' '.join(review1)
onehot_repr1=[one_hot(review1, voc_size) ]
sent_length=20
embedded_docs1=pad_sequences(onehot_repr1,padding='pre',maxlen=sent_length)
X_final1=np.array(embedded_docs1)
model_best = load_model('my_model.h5') ##導(dǎo)入之前保存的模型
print(model_best.predict(X_final1))
print("最終判斷結(jié)果:", probability_to_intres(model_best.predict(X_final1)))
完整代碼如下
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
import re
from nltk.corpus import stopwords
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from nltk.stem.porter import PorterStemmer
from sklearn.metrics import accuracy_score
from keras.models import load_model
df=pd.read_csv('train.csv')
df=df.dropna() ##去掉有nan的行
## Get the Independent Features
X=df.drop('label',axis=1) ##x是去掉標(biāo)簽列的數(shù)據(jù)
y=df['label'] ##標(biāo)簽列
### Vocabulary size
voc_size=5000
messages=X.copy()
messages.reset_index(inplace=True) ##重置索引,因?yàn)橛袆h除操作
ps = PorterStemmer()
corpus = []
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['title'][i]) ##只保留字母
review = review.lower() ##轉(zhuǎn)為小寫
review = review.split() ##按空白拆分
#去停用詞
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review) ##將列表轉(zhuǎn)為str類型
corpus.append(review)
onehot_repr=[one_hot(words,voc_size) for words in corpus] ##文本轉(zhuǎn)為one_hot編碼格式
sent_length=20
embedded_docs=pad_sequences(onehot_repr,padding='pre',maxlen=sent_length) ##用0前向填充橡娄,因?yàn)槲谋鹃L(zhǎng)度不一致
##轉(zhuǎn)成np.array數(shù)組格式
X_final=np.array(embedded_docs)
y_final=np.array(y)
##劃分訓(xùn)練集和測(cè)試集
X_train, X_test, y_train, y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)
##keras序貫?zāi)P?embedding_vector_features=40
model=Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_length))
model.add(Dropout(0.3))
model.add(LSTM(100)) ##Bidirectional(LSTM(100))雙向LSTM替換即可
model.add(Dropout(0.3))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
##開始訓(xùn)練
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
##預(yù)測(cè)
y_pred= model.predict(X_test) #輸出是一個(gè)離散值诗箍,越接近1就表示是1的概率越大
##值大于0.5就是1,小于0.5就是0
def probability_to_intres(data):
res = []
for i in data:
if i > 0.5:
res.append(1)
else:
res.append(0)
return res
res = probability_to_intres(y_pred)
confusion_matrix(y_test,res) ##評(píng)估矩陣
accuracy_score(y_test,res) ##準(zhǔn)確率
model.save('my_model.h5') ##保存
#如果來新數(shù)據(jù)了瀑踢,判斷是否是虛假新聞
new_text = "President Obama and President-Elect Donald Trump Meet at White House: Share:"
##上述方法進(jìn)行文本處理扳还,轉(zhuǎn)化格式
review1 = re.sub('[^a-zA-Z]', ' ', new_text)
review1 = review1.lower()
review1 = review1.split()
review1 = [ps.stem(word) for word in review1 if not word in stopwords.words('english')]
review1 = ' '.join(review1)
onehot_repr1=[one_hot(review1, voc_size) ]
sent_length=20
embedded_docs1=pad_sequences(onehot_repr1,padding='pre',maxlen=sent_length)
X_final1=np.array(embedded_docs1)
model_best = load_model('my_model.h5') ##導(dǎo)入之前保存的模型
print(model_best.predict(X_final1))
print("最終判斷結(jié)果:", probability_to_intres(model_best.predict(X_final1)))