基于gensim計算文檔相似性

gensim 官網(wǎng): https://radimrehurek.com/gensim/tutorial.html

訓(xùn)練tfidf, lsi, lda, doc2vec等4種模型向量化文檔

輸入文件兩列: 標(biāo)題 \t 分詞

do_train_model.py 訓(xùn)練模型

#! /usr/bin/env python
#encoding: utf-8

import sys
import os
import re
import logging
import time
from six import iteritems
from gensim import corpora, models, similarities
from gensim.models.doc2vec import LabeledSentence

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
        filename='1.log',
        filemode='w',
        level=logging.INFO)

g_charset='gbk'
g_pattern = re.compile(' +')

class MyCorpus(object):
    def __init__(self, fname):
        self.fname = fname
    def __iter__(self):
        for i,line in enumerate(open(self.fname)):
            s = line.rstrip('\n').split('\t')
            yield g_pattern.split(s[1].decode(g_charset)) # format: title \t tokens

class MyLabelCorpus(object):
    def __init__(self, fname):
        self.fname = fname
    def __iter__(self):
        for i,line in enumerate(open(self.fname)):
            s = line.rstrip('\n').split('\t')
            yield LabeledSentence(words=s[1].decode(g_charset, 'ignore').split(),tags = [i])

def train_tfidf(corpus, dictionary, model_file, vec_file):
    '''
    train tfidf model
    '''
    tfidf = models.TfidfModel(corpus)
    tfidf.save(model_file)
    corpus_tfidf = tfidf[corpus]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_tfidf) # unserialize: corpora.SvmLightCorpus(vec_file)

def train_lsi(corpus, dictionary, model_file, vec_file):
    '''
    train lsi model
    '''
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]
    lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=100)
    lsi.save(model_file)
    corpus_new = lsi[corpus_tfidf]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_new)

def train_lda(corpus, dictionary, model_file, vec_file):
    '''
    train lda model
    '''
    #tfidf = models.TfidfModel(corpus)
    #corpus_tfidf = tfidf[corpus]

    # also can use LdaModel
    lda = models.LdaMulticore(corpus, id2word=dictionary, num_topics=100, chunksize = 2000, passes = 50, iterations = 50, eval_every = None, workers = 8)
    lda.save(model_file)
    corpus_new = lda[corpus]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_new)

def save_svmlight_format(docvecs, outfile):
    fout = file(outfile, 'w')
    for t in docvecs:
        a = []
        for i,v in enumerate(t):
            a.append("%d:%.6f" % (i+1, v))
        fout.write("0 %s\n" % " ".join(a))
    fout.close()

def train_doc2vec(infile, model_file, vec_file):
    '''
    train doc2vec model
    '''
    corp = MyLabelCorpus(infile)
    model = models.Doc2Vec(corp, size=100, window=5, min_count=3, workers=12, hs=1, negative=0, dbow_words=1, iter = 40)
    model.save(model_file)
    save_svmlight_format(model.docvecs, vec_file)

def read_stop_file(stop_file):
    stoplist = []
    if os.path.isfile(stop_file):
        with open(stop_file) as f:
            stoplist = [w.strip().decode(g_charset, 'ignore') for w in f.readlines()]
    return stoplist

def read_corpus(infile):
    '''
    read corpus file and filter words
    '''
    corp = MyCorpus(infile)
    dictionary = corpora.Dictionary(corp)

    stop_file = 'stopwords.txt'
    stoplist = read_stop_file(stop_file)

    stop_ids = [dictionary.token2id[stopword] for stopword in stoplist \
            if stopword in dictionary.token2id]
    once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq <= 1]
    print "stop_ids: ",len(stop_ids)
    print "once_ids: ",len(once_ids)
    dictionary.filter_tokens(stop_ids + once_ids)
    dictionary.compactify()

    print "uniq tokens:", len(dictionary)

    corpus = [dictionary.doc2bow(text) for text in corp]
    return corpus, dictionary

def train_model(infile, tag):
    '''
    train different model to vecterize the document
    '''
    valid_tags = set(["tfidf", 'lsi', 'lda', 'doc2vec'])
    if tag not in valid_tags:
        print "wrong tag: %s" % tag
        return

    ts = time.time()
    prefix = "%s.%s" % (infile, tag)
    model_file = prefix + ".model"
    vec_file = prefix + ".vec"

    if tag == 'doc2vec':
        train_doc2vec(infile, model_file, vec_file)
    else:
        corpus, dictionary = read_corpus(infile)

        if tag == 'tfidf':
            train_tfidf(corpus, dictionary, model_file, vec_file)
        elif tag == 'lsi':
            train_lsi(corpus, dictionary, model_file, vec_file)
        elif tag == 'lda':
            train_lda(corpus, dictionary, model_file, vec_file)

    ts2 = time.time()
    cost = int(ts2-ts)
    print "cost_time:\t%s\t%s\t%d" % (infile, tag, cost)

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print "Usage: %s <infile> <tag>" % __file__
        print "\t train different model to vecterize the document"
        print "<tag>: tfidf, lsi, lda, doc2vec"
        sys.exit(-1)

    infile = sys.argv[1]
    tag = sys.argv[2]
    train_model(infile, tag)

do_query_simi.py 查詢相似文檔

#! /usr/bin/env python
#encoding: utf-8

import sys
import os
import logging
from gensim import corpora, models, similarities

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
        filename='1.log',
        filemode='w',
        level=logging.INFO)

def read_doc_file(infile):
    '''
    read doc file
    format: title \t tokens
    '''
    docs = []
    for line in file(infile):
        s = line.rstrip('\n').split('\t')
        docs.append(s[0])
    return docs

def get_feature_num(corpus_semantic):
    '''
    '''
    max_index = -1
    for i,v in enumerate(corpus_semantic):
        max_cur = max([t[0] for t in v])
        if max_cur > max_index:
            max_index = max_cur
    max_index += 1
    return max_index

def query_simi(infile, tag):
    '''
    query similar documents based on trained document vectors
    '''
    valid_tags = set(["tfidf", 'lsi', 'lda', 'doc2vec'])
    if tag not in valid_tags:
        print "wrong tag: %s" % tag
        return

    prefix = "%s.%s" % (infile, tag)
    vec_file = prefix + ".vec"
    index_file = vec_file + ".index"

    index = None
    corpus_semantic = corpora.SvmLightCorpus(vec_file)
    n = get_feature_num(corpus_semantic)
    print "feature num:", n
    if os.path.isfile(index_file):
        if tag == 'tfidf':
            index = similarities.SparseMatrixSimilarity.load(index_file)
        else:
            index = similarities.MatrixSimilarity.load(index_file)
    else:
        if tag == 'tfidf':
            index = similarities.SparseMatrixSimilarity(corpus_semantic, num_features = n)
        else:
            index = similarities.MatrixSimilarity(corpus_semantic)
        index.save(index_file)

    # read file
    docs = read_doc_file(infile)
    doc_map = {}
    for i,doc in enumerate(docs):
        doc_map[doc] = i

    # query
    topN = 10
    corpus_semantic = list(corpus_semantic)
    while True:
        query = raw_input("\ninput query: ")
        if query == 'q' or query == 'quit':
            break
        query = query.strip()
        q = doc_map.get(query, -1)
        if q == -1:
            continue
        print q
        #print "query_doc: %s" % docs[q]
        sims = index[corpus_semantic[q]]
        sims = sorted(enumerate(sims), key=lambda item: -item[1])
        for k,v in sims[:topN]:
            i = int(k)
            print "%.3f\t%s" % (v, docs[i])

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print "Usage: %s <infile> <tag>" % __file__
        print "<tag>: tfidf, lsi, lda, doc2vec"
        sys.exit(-1)

    infile = sys.argv[1]
    tag = sys.argv[2]
    query_simi(infile, tag)
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末贰逾,一起剝皮案震驚了整個濱河市槐沼,隨后出現(xiàn)的幾起案子吕粗,更是在濱河造成了極大的恐慌米愿,老刑警劉巖宗兼,帶你破解...
    沈念sama閱讀 218,640評論 6 507
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件爵憎,死亡現(xiàn)場離奇詭異把跨,居然都是意外死亡们拙,警方通過查閱死者的電腦和手機,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 93,254評論 3 395
  • 文/潘曉璐 我一進(jìn)店門劫瞳,熙熙樓的掌柜王于貴愁眉苦臉地迎上來倘潜,“玉大人,你說我怎么就攤上這事志于′桃颍” “怎么了?”我有些...
    開封第一講書人閱讀 165,011評論 0 355
  • 文/不壞的土叔 我叫張陵伺绽,是天一觀的道長养泡。 經(jīng)常有香客問我,道長奈应,這世上最難降的妖魔是什么澜掩? 我笑而不...
    開封第一講書人閱讀 58,755評論 1 294
  • 正文 為了忘掉前任,我火速辦了婚禮杖挣,結(jié)果婚禮上肩榕,老公的妹妹穿的比我還像新娘。我一直安慰自己惩妇,他們只是感情好株汉,可當(dāng)我...
    茶點故事閱讀 67,774評論 6 392
  • 文/花漫 我一把揭開白布筐乳。 她就那樣靜靜地躺著,像睡著了一般乔妈。 火紅的嫁衣襯著肌膚如雪蝙云。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 51,610評論 1 305
  • 那天路召,我揣著相機與錄音勃刨,去河邊找鬼。 笑死优训,一個胖子當(dāng)著我的面吹牛朵你,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播揣非,決...
    沈念sama閱讀 40,352評論 3 418
  • 文/蒼蘭香墨 我猛地睜開眼抡医,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了早敬?” 一聲冷哼從身側(cè)響起忌傻,我...
    開封第一講書人閱讀 39,257評論 0 276
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎搞监,沒想到半個月后水孩,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 45,717評論 1 315
  • 正文 獨居荒郊野嶺守林人離奇死亡琐驴,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 37,894評論 3 336
  • 正文 我和宋清朗相戀三年俘种,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片绝淡。...
    茶點故事閱讀 40,021評論 1 350
  • 序言:一個原本活蹦亂跳的男人離奇死亡宙刘,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出牢酵,到底是詐尸還是另有隱情悬包,我是刑警寧澤,帶...
    沈念sama閱讀 35,735評論 5 346
  • 正文 年R本政府宣布馍乙,位于F島的核電站布近,受9級特大地震影響,放射性物質(zhì)發(fā)生泄漏丝格。R本人自食惡果不足惜撑瞧,卻給世界環(huán)境...
    茶點故事閱讀 41,354評論 3 330
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望显蝌。 院中可真熱鬧季蚂,春花似錦、人聲如沸琅束。這莊子的主人今日做“春日...
    開封第一講書人閱讀 31,936評論 0 22
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽涩禀。三九已至料滥,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間艾船,已是汗流浹背葵腹。 一陣腳步聲響...
    開封第一講書人閱讀 33,054評論 1 270
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留屿岂,地道東北人践宴。 一個月前我還...
    沈念sama閱讀 48,224評論 3 371
  • 正文 我出身青樓,卻偏偏與公主長得像爷怀,于是被迫代替她去往敵國和親阻肩。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點故事閱讀 44,974評論 2 355

推薦閱讀更多精彩內(nèi)容