ML匯總

常用的scikit-learn分類器

# -*- coding: utf-8 -*-
"""
Created on Fri Jul 29 21:51:11 2016

The use of ten classic machine learning algorithm!

@author: ckawyh
"""


import sys
import time
from sklearn import metrics
import numpy as np
import cPickle as pickle

reload(sys)
sys.setdefaultencoding('utf8')

# Multinomial Naive Bayes Classifier
def naive_bayes_classifier(train_x, train_y):
   from sklearn.naive_bayes import MultinomialNB
   model = MultinomialNB(alpha=0.01)
   model.fit(train_x, train_y)
   return model


# KNN Classifier
def knn_classifier(train_x, train_y):
   from sklearn.neighbors import KNeighborsClassifier
   model = KNeighborsClassifier()
   model.fit(train_x, train_y)
   return model


# Logistic Regression Classifier
def logistic_regression_classifier(train_x, train_y):
   from sklearn.linear_model import LogisticRegression
   model = LogisticRegression(penalty='l2')
   model.fit(train_x, train_y)
   return model


# Random Forest Classifier
def random_forest_classifier(train_x, train_y):
   from sklearn.ensemble import RandomForestClassifier
   model = RandomForestClassifier(n_estimators=8)
   model.fit(train_x, train_y)
   return model


# Decision Tree Classifier
def decision_tree_classifier(train_x, train_y):
   from sklearn import tree
   model = tree.DecisionTreeClassifier()
   model.fit(train_x, train_y)
   return model


# GBDT(Gradient Boosting Decision Tree) Classifier
def gradient_boosting_classifier(train_x, train_y):
   from sklearn.ensemble import GradientBoostingClassifier
   model = GradientBoostingClassifier(n_estimators=200)
   model.fit(train_x, train_y)
   return model


# SVM Classifier
def svm_classifier(train_x, train_y):
   from sklearn.svm import SVC
   model = SVC(kernel='rbf', probability=True)
   model.fit(train_x, train_y)
   return model

# SVM Classifier using cross validation
def svm_cross_validation(train_x, train_y):
   from sklearn.grid_search import GridSearchCV
   from sklearn.svm import SVC
   model = SVC(kernel='rbf', probability=True)
   param_grid = {'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'gamma': [0.001, 0.0001]}
   grid_search = GridSearchCV(model, param_grid, n_jobs = 1, verbose=1)
   grid_search.fit(train_x, train_y)
   best_parameters = grid_search.best_estimator_.get_params()
   for para, val in best_parameters.items():
       print para, val
   model = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)
   model.fit(train_x, train_y)
   return model

    
if __name__ == '__main__':
   from sklearn.datasets import  load_iris
   from sklearn import cross_validation
   from pandas import DataFrame
   data_dict = load_iris()
   data = data_dict.data
   label = data_dict.target
   df = DataFrame(data)
   df[4] = label
   data_array = df.as_matrix()
   split_train, split_cv = cross_validation.train_test_split(data_array,test_size=0.3,random_state=0)
   
   train_x = split_train[:,0:4]
   train_y = split_train[:,4]
   test_x = split_cv[:,0:4]
   test_y = split_cv[:,4]    
   
   model_save_file = None
   model_save = {}
   test_classifiers = ['NB', 'KNN', 'LR', 'RF', 'DT', 'SVM', 'SVMCV', 'GBDT']
   classifiers = {'NB':naive_bayes_classifier,
                 'KNN':knn_classifier,
                 'LR':logistic_regression_classifier,
                 'RF':random_forest_classifier,
                 'DT':decision_tree_classifier,
                 'SVM':svm_classifier,
                 'SVMCV':svm_cross_validation,
                 'GBDT':gradient_boosting_classifier
   }
   num_train, num_feat = train_x.shape
   num_test, num_feat = test_x.shape
   is_binary_class = (len(np.unique(train_y)) == 2)
   print '******************** Data Info *********************'
   print '#training data: %d, #testing_data: %d, dimension: %d' % (num_train, num_test, num_feat)
   
   for classifier in test_classifiers:
       print '******************* %s ********************' % classifier
       start_time = time.time()
       model = classifiers[classifier](train_x, train_y)
       print 'training took %fs!' % (time.time() - start_time)
       predict = model.predict(test_x)
       if model_save_file != None:
           model_save[classifier] = model
       accuracy = metrics.accuracy_score(test_y, predict)
       report = metrics.classification_report(test_y, predict)
       print 'accuracy: %.2f%%' % (100 * accuracy)
       print report
   
   if model_save_file != None:
       pickle.dump(model_save, open(model_save_file, 'wb'))
運行結果:
******************** Data Info *********************
#training data: 105, #testing_data: 45, dimension: 4
******************* NB ********************
training took 0.001000s!
accuracy: 60.00%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       0.00      0.00      0.00        18
        2.0       0.38      1.00      0.55        11

avg / total       0.45      0.60      0.49        45

******************* KNN ********************
training took 0.000000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45

******************* LR ********************
training took 0.001000s!
accuracy: 88.89%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.72      0.84        18
        2.0       0.69      1.00      0.81        11

avg / total       0.92      0.89      0.89        45

******************* RF ********************
training took 0.019000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45

******************* DT ********************
training took 0.000000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45

******************* SVM ********************
training took 0.001000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45

******************* SVMCV ********************
Fitting 3 folds for each of 14 candidates, totalling 42 fits
kernel rbf
C 1000
verbose False
probability True
degree 3
shrinking True
max_iter -1
decision_function_shape None
random_state None
tol 0.001
cache_size 200
coef0 0.0
gamma 0.001
class_weight None
training took 0.143000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45

******************* GBDT ********************
[Parallel(n_jobs=1)]: Done  42 out of  42 | elapsed:    0.0s finished
training took 0.176000s!
accuracy: 97.78%
             precision    recall  f1-score   support

        0.0       1.00      1.00      1.00        16
        1.0       1.00      0.94      0.97        18
        2.0       0.92      1.00      0.96        11

avg / total       0.98      0.98      0.98        45
最后編輯于
?著作權歸作者所有,轉載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末儡炼,一起剝皮案震驚了整個濱河市腮介,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌,老刑警劉巖砚嘴,帶你破解...
    沈念sama閱讀 212,599評論 6 492
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件,死亡現(xiàn)場離奇詭異蜒茄,居然都是意外死亡梅垄,警方通過查閱死者的電腦和手機厂捞,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,629評論 3 385
  • 文/潘曉璐 我一進店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來,“玉大人靡馁,你說我怎么就攤上這事欲鹏。” “怎么了臭墨?”我有些...
    開封第一講書人閱讀 158,084評論 0 348
  • 文/不壞的土叔 我叫張陵赔嚎,是天一觀的道長。 經(jīng)常有香客問我胧弛,道長尤误,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 56,708評論 1 284
  • 正文 為了忘掉前任结缚,我火速辦了婚禮损晤,結果婚禮上,老公的妹妹穿的比我還像新娘红竭。我一直安慰自己尤勋,他們只是感情好,可當我...
    茶點故事閱讀 65,813評論 6 386
  • 文/花漫 我一把揭開白布德崭。 她就那樣靜靜地躺著斥黑,像睡著了一般揖盘。 火紅的嫁衣襯著肌膚如雪眉厨。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 50,021評論 1 291
  • 那天兽狭,我揣著相機與錄音憾股,去河邊找鬼。 笑死箕慧,一個胖子當著我的面吹牛服球,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播颠焦,決...
    沈念sama閱讀 39,120評論 3 410
  • 文/蒼蘭香墨 我猛地睜開眼斩熊,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了伐庭?” 一聲冷哼從身側響起粉渠,我...
    開封第一講書人閱讀 37,866評論 0 268
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎圾另,沒想到半個月后霸株,有當?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 44,308評論 1 303
  • 正文 獨居荒郊野嶺守林人離奇死亡集乔,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 36,633評論 2 327
  • 正文 我和宋清朗相戀三年去件,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點故事閱讀 38,768評論 1 341
  • 序言:一個原本活蹦亂跳的男人離奇死亡尤溜,死狀恐怖倔叼,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情靴跛,我是刑警寧澤缀雳,帶...
    沈念sama閱讀 34,461評論 4 333
  • 正文 年R本政府宣布,位于F島的核電站梢睛,受9級特大地震影響肥印,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜绝葡,卻給世界環(huán)境...
    茶點故事閱讀 40,094評論 3 317
  • 文/蒙蒙 一深碱、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧藏畅,春花似錦敷硅、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,850評論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至榜旦,卻和暖如春幽七,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背溅呢。 一陣腳步聲響...
    開封第一講書人閱讀 32,082評論 1 267
  • 我被黑心中介騙來泰國打工澡屡, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留,地道東北人咐旧。 一個月前我還...
    沈念sama閱讀 46,571評論 2 362
  • 正文 我出身青樓驶鹉,卻偏偏與公主長得像,于是被迫代替她去往敵國和親铣墨。 傳聞我的和親對象是個殘疾皇子室埋,可洞房花燭夜當晚...
    茶點故事閱讀 43,666評論 2 350

推薦閱讀更多精彩內(nèi)容