借鑒于http://scikit-learn.org/stable/modules/outlier_detection.html#novelty-and-outlier-detection
一拧略、概況
兩大異常
novelty detection
這些訓(xùn)練數(shù)據(jù)沒(méi)有被異常值所污染昔园,我們有興趣在新的觀測(cè)中發(fā)現(xiàn)異常。
outlier detection
訓(xùn)練數(shù)據(jù)中包含異常值种蘸,和我們需要合適的訓(xùn)練數(shù)據(jù)中心模式忽略的越軌的意見(jiàn)鞠值。
機(jī)器學(xué)習(xí)(無(wú)監(jiān)督學(xué)習(xí))
學(xué)習(xí):estimator.fit(X_train)
預(yù)測(cè):estimator.predict(X_test)媚创,異常值為-1
二、novelty detection
以下為建模代碼:
import numpy as np
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data 生成訓(xùn)練數(shù)據(jù)
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations 生成一些常規(guī)的新奇觀察
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations 產(chǎn)生一些異常新穎的觀察
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model 模型學(xué)習(xí)
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
三彤恶、Outlier Detection
covariance.EmpiricalCovariance算法
在高斯分布數(shù)據(jù)上顯示具有馬氏距離的協(xié)方差估計(jì)的示例钞钙。
以下為建模代碼:
import numpy as np
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data 生成數(shù)據(jù)
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers 添加一些異常值
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features),outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
# 擬合最小協(xié)方差行列式(MCD)對(duì)數(shù)據(jù)的魯棒估計(jì)
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
# 比較估計(jì)器從完整的數(shù)據(jù)集和真實(shí)參數(shù)的學(xué)習(xí)
emp_cov = EmpiricalCovariance().fit(X)
# Computes the squared Mahalanobis distances of given observations.
# 計(jì)算給定觀測(cè)值的平方Mahalanobis距離。
Y = emp_cov.mahalanobis(X)
ensemble.IsolationForest算法
在高維數(shù)據(jù)集中執(zhí)行異常值檢測(cè)的一種有效方法是使用隨機(jī)森林
neighbors.LocalOutlierFactor(LOF)算法
對(duì)中等高維數(shù)據(jù)集執(zhí)行異常值檢測(cè)的另一種有效方法是使用局部離群因子(LOF)算法粤剧。
結(jié)合以上四種異常檢測(cè)方法建模比較:
sklearn.svm(支持向量機(jī))
sklearn.covariance.EllipticEnvelope(高斯分布的協(xié)方差估計(jì))
sklearn.ensemble.IsolationForest(隨機(jī)森林)
sklearn.neighbors.LocalOutlierFactor(LOF)
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
#隨機(jī)數(shù)發(fā)生器
rng = np.random.RandomState(42)
# Example settings 示例設(shè)置
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared 定義兩個(gè)異常的檢測(cè)工具進(jìn)行比較
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor.LocalOutlierFactor(n_neighbors = 35,
contamination=outliers_fraction)
}
# Compare given classifiers under given settings 比較給定設(shè)置下的分類器
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation 將不同的集群分離擬合
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation 生成數(shù)據(jù)
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers 添加異常值
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model 模型擬合
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers 擬合數(shù)據(jù)和標(biāo)簽離群值
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
print(scores_pred)
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF 決策函數(shù)是LOF的私有函數(shù)
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
print(Z)