import jieba
import numpy as np
import xml.dom.minidom
import random
from gensim.models import Word2Vec
from gensim.corpora.dictionary import Dictionary
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten,Activation,Dropout,Merge
from keras.layers import Conv1D, MaxPooling1D, Embedding,GlobalMaxPooling1D
from keras.models import Model
from keras.models import Sequential
from gensim.models.keyedvectors import KeyedVectors
from keras.callbacks import EarlyStopping,ModelCheckpoint,Callback
np.random.seed(1337)
MAX_SEQUENCE_LENGTH = 70
MAX_NB_WORDS = 600000
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
bwfile = open("badwordlist.txt",encoding='utf8')
bwlist = [line.strip('\n') for line in bwfile]
bwfile.close()
for bw in bwlist:
jieba.add_word(bw)
emo = ['like','fear','disgust','anger','surprise','sadness','happiness','none']
smallemo= ['angry/disgusted','happy/like','sad','afraid/surprised','other']
'''
stop_words = ["的", "一", "不", "在", "人", "有", "是", "為", "以", "于", "上", "他", "而",
"后", "之", "來", "及", "了", "因", "下", "可", "到", "由", "這", "與", "也",
"此", "但", "并", "個", "其", "已", "無", "小", "我", "們", "起", "最", "再",
"今", "去", "好", "只", "又", "或", "很", "亦", "某", "把", "那", "你", "乃",
"它","要", "將", "應(yīng)", "位", "新", "兩", "中", "更", "我們", "自己", "沒有", "“", "”",
"针饥,", "(", ")", " ",'[',']',' ','~','祥得。','!',':','伤柄、','/','…']
'''
stop_words = []
sw = open('stopwords.txt')
for line in sw:
line = line.strip("\n")
stop_words.append(line)
sw.close()
def segmentWord(cont):
c = []
for i in cont:
a = list(jieba.cut(i,cut_all=True))
b = " ".join(a)
c.append(b)
return c
def text_to_index_array(p_new_dic, p_sen): # 文本轉(zhuǎn)為索引數(shù)字模式
new_sentences = []
for sen in p_sen:
new_sen = []
for word in sen:
try:
new_sen.append(p_new_dic[word]) # 單詞轉(zhuǎn)索引數(shù)字
except:
new_sen.append(0) # 索引字典里沒有的詞轉(zhuǎn)為數(shù)字0
new_sentences.append(new_sen)
return new_sentences
def readTrain(filename):
DOMTree = xml.dom.minidom.parse(filename)
collection = DOMTree.documentElement
weibos = collection.getElementsByTagName("weibo")
traindata = []
trainlabel = []
data = [[],[],[],[],[],[],[],[]]
for weibo in weibos:
sentence = weibo.getElementsByTagName('sentence')
for e in sentence:
sen = e.childNodes[0].data
if e.getAttribute('opinionated')=='Y':
emotion1 = e.getAttribute('emotion-1-type')
emotion2 = e.getAttribute('emotion-2-type')
data[emo.index(emotion1)].append(sen)
if emotion2 != 'none':
data[emo.index(emotion2)].append(sen)
else:
data[7].append(sen)
#smalldata = [[],[],[],[],[]]
smalldata = [[],[],[],[]]
smalldata[0]=data[2]+data[3]
smalldata[1]=data[0]+data[6]
smalldata[2]=data[5]
smalldata[3]=data[1]+data[4]
smalldata[3] *= 2
#smalldata[4]=data[7]
for i,d in enumerate(smalldata):
for ele in d:
traindata.append(ele)
trainlabel.append(i)
return traindata,trainlabel
traindata,label = readTrain("Training data for Emotion Classification.xml")
#traindata,label = adjustData(traindata,label)
traindata = segmentWord(traindata)
testdata = []
tfile = open('withoutother.txt',encoding='utf8')
for line in tfile:
testdata.append(line)
tfile.close()
testdata = segmentWord(testdata)
testlabel = []
labelfile = open('testlabelnoother.txt')
for l in labelfile:
line = l.strip('\n')
testlabel.append(smallemo.index(line))
labelfile.close()
traintexts = [[word for word in document.split() if word not in stop_words] for document in traindata]
testtexts = [[word for word in document.split() if word not in stop_words] for document in testdata]
word_vectors = KeyedVectors.load_word2vec_format('zhwiki_2017_03.sg_50d.word2vec', binary=False)
#word_vectors = Word2Vec(traintexts+testtexts, size=EMBEDDING_DIM, window=5, min_count=1)
#word_vectors.wv.save_word2vec_format('smallwv.txt',binary=False)
#word_vectors = KeyedVectors.load_word2vec_format('smallwv.txt', binary=False)
gensim_dict = Dictionary()
gensim_dict.doc2bow(word_vectors.vocab.keys(), allow_update=True)
w2indx = {v: k + 1 for k, v in gensim_dict.items()} # 詞語的索引履澳,從1開始編號
w2vec = {word: word_vectors[word] for word in w2indx.keys()}
trainseq = text_to_index_array(w2indx, traintexts)
testseq = text_to_index_array(w2indx, testtexts)
traindata = pad_sequences(trainseq, maxlen=MAX_SEQUENCE_LENGTH)
testdata = pad_sequences(testseq, maxlen=MAX_SEQUENCE_LENGTH)
word_index = w2indx
print('Found %s unique tokens.' % len(word_index))
labels = to_categorical(np.asarray(label))
testlabels = to_categorical(np.asarray(testlabel))
indices = np.arange(traindata.shape[0])
np.random.shuffle(indices)
traindata = traindata[indices]
labels = labels[indices]
x_train = traindata[:] # 訓(xùn)練集
y_train = labels[:]# 訓(xùn)練集的標(biāo)簽
x_val = testdata[:]# 測試集菩收,英文原意是驗證集
y_val = testlabels[:]
print('Preparing embedding matrix.')
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = w2vec.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # word_index to word_embedding_vector ,<20000(nb_words)
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
model = Sequential()
model.add(embedding_layer)
model.add(Conv1D(256,4,padding='valid',activation='relu',strides=1))
model.add(GlobalMaxPooling1D())
#model.add(Dropout(0.2))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(4))
model.add(Activation('softmax'))
# 優(yōu)化器我這里用了adadelta,也可以使用其他方法
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# =下面開始訓(xùn)練堪澎,nb_epoch是迭代次數(shù)炼吴,可以高一些,訓(xùn)練效果會更好扁眯,但是訓(xùn)練會變慢
#early_stopping =EarlyStopping(monitor='val_loss', patience=2)
#checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=1, save_best_only=True)
model.fit(x_train, y_train,batch_size=64,epochs=4,validation_data=(x_val,y_val))
pre = model.predict_classes(x_val,verbose=0,batch_size=64)
prelabel = []
predlabelfile = open("predfile.txt",'w+')
for p in pre:
predlabelfile.write(str(smallemo[p])+'\n')
predlabelfile.close()
中文微博badword分類
最后編輯于 :
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
- 文/潘曉璐 我一進(jìn)店門概作,熙熙樓的掌柜王于貴愁眉苦臉地迎上來腋妙,“玉大人,你說我怎么就攤上這事讯榕≈杷兀” “怎么了?”我有些...
- 文/不壞的土叔 我叫張陵愚屁,是天一觀的道長济竹。 經(jīng)常有香客問我,道長霎槐,這世上最難降的妖魔是什么送浊? 我笑而不...
- 正文 為了忘掉前任,我火速辦了婚禮丘跌,結(jié)果婚禮上袭景,老公的妹妹穿的比我還像新娘。我一直安慰自己闭树,他們只是感情好耸棒,可當(dāng)我...
- 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著报辱,像睡著了一般与殃。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發(fā)上碍现,一...
- 文/蒼蘭香墨 我猛地睜開眼膨疏,長吁一口氣:“原來是場噩夢啊……” “哼一睁!你這毒婦竟也來了?” 一聲冷哼從身側(cè)響起佃却,我...
- 正文 年R本政府宣布膳犹,位于F島的核電站,受9級特大地震影響签则,放射性物質(zhì)發(fā)生泄漏须床。R本人自食惡果不足惜,卻給世界環(huán)境...
- 文/蒙蒙 一怀愧、第九天 我趴在偏房一處隱蔽的房頂上張望侨颈。 院中可真熱鬧,春花似錦芯义、人聲如沸哈垢。這莊子的主人今日做“春日...
- 文/蒼蘭香墨 我抬頭看了看天上的太陽耘分。三九已至,卻和暖如春绑警,著一層夾襖步出監(jiān)牢的瞬間求泰,已是汗流浹背。 一陣腳步聲響...
推薦閱讀更多精彩內(nèi)容
- 這一期懈涛,我們降低難度逛万,也是為了更接地氣,讓大家可以更容易地把小名講的東西運用到實踐中去批钠。所以宇植,小名定了“怎么打理你...
- 在微博上得封,平均每日產(chǎn)生的新話題量達(dá)20萬之多,微博話題的傳播也有他們的生命周期当纱,一般遵循“誕生期→成長期→成熟期→...