優(yōu)點: 在數據較少的情況下仍然有效,可以處理多類別問題
缺點:對于輸入數據的準備方式較為敏感
適用數據類型: 標稱型數據
訓練集TrainingSet={(x1,y1),(x2,y2),...,(xN,yN)} 包含N條訓練數據,其中 xi=(x(1)i,x(2)i,...,x(M)i)T是M維向量,yi∈{c1,c2,...cK}屬于K類中的一類
# -*- coding: utf-8 -*-
from numpy import *
# 1. Prepare data
def load_data():
documents = [
['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']
]
class_lebal = [0, 1, 0, 1, 0, 1]
return documents, class_lebal
# 2. Get word list not same
def create_vocalist(documents):
word_set = set([])
for doc in documents:
word_set = word_set | set(doc)
return list(word_set)
# 3. Convert document to vector
def doc_to_vector(word_list, document):
return_vector = [0]*len(word_list)
for word in document:
if word in word_list:
return_vector[word_list.index(word)] =1
return return_vector
# 4.train
def train_bayes(train_matrix, train_category):
num_docs = len(train_matrix)
num_words = len(train_matrix[0])
# P(c1) and we can cal P(c0) = 1-P(c1)
p1_prob = sum(train_category)/float(num_docs)
# word count for every category
p0_zeros = ones(num_words)
p1_zeros = ones(num_words)
# total count for every category
p1_count = 2.0; p0_count = 2.0
for i in range(num_docs):
if train_category[i] == 1:
p1_zeros += train_matrix[i]
p1_count +=sum(train_matrix[i])
else:
p0_zeros += train_matrix[i]
p0_count += sum(train_matrix[i])
# P(Wi/Ci)
p1_vec = log(p1_zeros/p1_count)
p0_vec = log(p0_zeros/p0_count)
return p0_vec, p1_vec, p1_prob
# 5. classify
def classify(vec_classify, p0_vec, p1_vec, p1_prob):
""" P(Ci|W) = P(w|Ci)P(Ci)/P(W) P(W) is not need count
:param vec_classify: vector to predict
:param p0_vec:P(Wi/C0)
:param p1_vec: P(Wi/C1)
:param p1_prob: P(C1)
:return: 0 or 1
"""
p1 = sum(vec_classify * p1_vec) + log(p1_prob)
p0 = sum(vec_classify * p0_vec) + log(1 - p1_prob)
if p1 > p0:
return 1
return 0
documents, class_lebal = load_data()
# 1.word list
voc_list = create_vocalist(documents)
print voc_list
# 2. word matrix to word vector
train_mat = []
for doc in documents:
train_mat.append(doc_to_vector(voc_list, doc))
print train_mat
# 3. P(Wi/Ci) P(Ci)
p0_vec, p1_vec, p1_prob = train_bayes(train_mat, class_lebal)
print p0_vec
print p1_prob
my_doc = ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid']
my_vec = doc_to_vector(voc_list, my_doc)
class_l = classify(my_vec, p0_vec, p1_vec, p1_prob)
print class_l