先放個(gè)代碼和結(jié)果狂窑,改天閑了總結(jié)。
用余弦距離計(jì)算相似度以判斷向量化效果
tf-idf泉哈、doc2bow稀疏,適合短文本
doc2vec效果時(shí)好時(shí)壞奕纫,偶然性大,不穩(wěn)
lsi若锁、lda效果好且較穩(wěn)斧吐,但lda計(jì)算量偏大
from gensim.models import doc2vec
from gensim import corpora,models
import jieba,os
from gensim.similarities.docsim import Similarity
raw_documents=[]
for root,p,files in os.walk('C:/Users/Administrator/Desktop/testdata/'):
for file in files:
f=open(root+file,encoding='utf8')
s=f.read().replace(' ','').replace('\t','').replace('\r\n','').replace('\r','').replace('\n','')
raw_documents.append(s)
f.close()
print('data ok!')
corpora_documents = []
corpora_documents2=[]
for i, item_text in enumerate(raw_documents):
words_list = list(jieba.cut(item_text))
document = doc2vec.TaggedDocument(words=words_list, tags=[i])
corpora_documents.append(words_list)
corpora_documents2.append(document)
# 生成字典和向量語料
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
#sim模型
similarity = Similarity('-Similarity-index', corpus, num_features=10000)
#測試數(shù)據(jù)
test_data_1 = '周杰倫是個(gè)低調(diào)愛做慈善的好明星'
test_cut_raw_1 = list(jieba.cut(test_data_1))
#用sim計(jì)算相似度
'''test_corpus_1 = dictionary.doc2bow(test_cut_raw_1)
similarity.num_best = 5
print('——————————————sim———————————————')
print(similarity[test_corpus_1]) # 返回最相似的樣本材料,(index_of_document, similarity) tuples'''
#doc2vec計(jì)算相似度
model = doc2vec.Doc2Vec(size=89, min_count=1, iter=10)
model.build_vocab(corpora_documents2)
model.train(corpora_documents2,total_examples=model.corpus_count, epochs=model.iter)
print('——————————————doc2vec———————————————')
inferred_vector = model.infer_vector(test_cut_raw_1)
sims = model.docvecs.most_similar([inferred_vector], topn=5)
print(sims)
#轉(zhuǎn)化成tf-idf向量
tfidf_model=models.TfidfModel(corpus)
corpus_tfidf = [tfidf_model[doc] for doc in corpus]
#轉(zhuǎn)化成lsi向量
lsi= models.LsiModel(corpus_tfidf,id2word=dictionary,num_topics=50)
corpus_lsi = [lsi[doc] for doc in corpus]
similarity_lsi=Similarity('Similarity-Lsi-index', corpus_lsi, num_features=1600,num_best=5)
test_corpus_3 = dictionary.doc2bow(test_cut_raw_1) # 2.轉(zhuǎn)換成bow向量
test_corpus_tfidf_3 = tfidf_model[test_corpus_3] # 3.計(jì)算tfidf值
test_corpus_lsi_3 = lsi[test_corpus_tfidf_3] # 4.計(jì)算lsi值
# lsi.add_documents(test_corpus_lsi_3) #更新LSI的值
print('——————————————lsi———————————————')
print(similarity_lsi[test_corpus_lsi_3])
#轉(zhuǎn)化成lda向量
lda= models.LdaModel(corpus_tfidf,id2word=dictionary,num_topics=50)
corpus_lda = [lda[doc] for doc in corpus]
similarity_lda=Similarity('Similarity-LDA-index', corpus_lda, num_features=1600,num_best=5)
test_corpus_lda_3 = lda[test_corpus_tfidf_3] # 4.計(jì)算lda值
# lda.add_documents(test_corpus_lda_3) #更新Lda的值
print('——————————————lda———————————————')
print(similarity_lda[test_corpus_lda_3])
print(lsi)
print('——————————————向量———————————————')
print(lsi[corpus_tfidf[0]])
#print(lsi.print_topics())