kNN近鄰算法
算法原理
樣本點(diǎn)的特性與該鄰居點(diǎn)的特性類似,可以簡單理解為“物以類聚”定铜。因此可以使用目標(biāo)點(diǎn)的多個(gè)鄰近點(diǎn)的特性表示當(dāng)前點(diǎn)的特性阳液。
k近鄰算法是非常特殊的,可以被認(rèn)為是沒有模型的算法揣炕,為了和其他算法統(tǒng)一帘皿,可以認(rèn)為訓(xùn)練數(shù)據(jù)集就是模型本身。
KNN分類算法:“投票法”畸陡,選擇這k 個(gè)樣本中出現(xiàn)最多的類別標(biāo)記作為預(yù)測結(jié)果鹰溜。
KNN回歸算法:“平均法”,將這k 個(gè)樣本的實(shí)值輸出標(biāo)記的平均值作為預(yù)測結(jié)果丁恭。
歐拉距離公式
歐拉距離公式
化簡公式
KNN算法的核心要素
1.K值的選擇:K是超參(需要給定)曹动,K值過小容易導(dǎo)致過擬合(比如噪音點(diǎn)的數(shù)據(jù)會(huì)對結(jié)果造成影響),K值過大訓(xùn)練誤差會(huì)增大牲览,同時(shí)會(huì)使模型變得簡單墓陈,容易導(dǎo)致欠擬合。
2.距離的度量:采用歐式距離第献。
3.決策規(guī)則:在分類模型中贡必,主要使用多數(shù)表決法或者加權(quán)多數(shù)表決法;在回歸模型中庸毫,主要使用平均值法或者加權(quán)平均值法仔拟。(基于距離遠(yuǎn)近進(jìn)行加權(quán),飒赃,距離越近的樣本權(quán)重越大.)利花。
kNN算法源碼
import numpy as np
from math import sqrt
from collections import Counter
from sklearn.metrics import accuracy_score
class KNNClassifier:
def __init__(self, k):
"""初始化kNN分類器"""
assert k >= 1, "k must be valid"
self.k = k
self._X_train = None
self._y_train = None
def fit(self, X_train, y_train):
"""根據(jù)訓(xùn)練數(shù)據(jù)集X_train和y_train訓(xùn)練kNN分類器"""
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must be equal to the size of y_train"
assert self.k <= X_train.shape[0], \
"the size of X_train must be at least k."
self._X_train = X_train
self._y_train = y_train
return self
def predict(self, X_predict):
"""給定待預(yù)測數(shù)據(jù)集X_predict,返回表示X_predict的結(jié)果向量"""
assert self._X_train is not None and self._y_train is not None, \
"must fit before predict!"
assert X_predict.shape[1] == self._X_train.shape[1], \
"the feature number of X_predict must be equal to X_train"
y_predict = [self._predict(x) for x in X_predict]
return np.array(y_predict)
def _predict(self, x):
"""給定單個(gè)待預(yù)測數(shù)據(jù)x载佳,返回x的預(yù)測結(jié)果值"""
assert x.shape[0] == self._X_train.shape[1], \
"the feature number of x must be equal to X_train"
distances = [sqrt(np.sum((x_train - x) ** 2))
for x_train in self._X_train]
nearest = np.argsort(distances)
topK_y = [self._y_train[i] for i in nearest[:self.k]]
votes = Counter(topK_y)
return votes.most_common(1)[0][0]
def score(self, X_test, y_test):
"""根據(jù)測試數(shù)據(jù)集 X_test 和 y_test 確定當(dāng)前模型的準(zhǔn)確度"""
y_predict = self.predict(X_test)
return accuracy_score(y_test, y_predict)
def __repr__(self):
return "KNN(k=%d)" % self.k
kNN原生代碼
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
#數(shù)據(jù)處理
raw_data_X = [[3.393533211, 2.331273381],
[3.110073483, 1.781539638],
[1.343808831, 3.368360954],
[3.582294042, 4.679179110],
[2.280362439, 2.866990263],
[7.423436942, 4.696522875],
[5.745051997, 3.533989803],
[9.172168622, 2.511101045],
[7.792783481, 3.424088941],
[7.939820817, 0.791637231]
]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
x = np.array([8.093607318, 3.365731514]) #要判斷的新的點(diǎn)歸于屬于0還是1
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
#近鄰算法:計(jì)算距離
distances = []
for x_train in X_train:
d = sqrt(np.sum((x_train-x)**2)) #求歐拉距離的公式
distances.append(d)
print(distances)
nearest = np.argsort(distances)#按索引排序炒事,默認(rèn)從小到大
print(nearest)
k = 6 #knn算法:取6
num = [y_train[neighbors] for neighbors in nearest[:k]]
print(num)
from collections import Counter
votes = Counter(num)
print(votes.most_common(1)[0][0])
plt.scatter(X_train[y_train==0,0],X_train[y_train==0,1],color='g')
plt.scatter(X_train[y_train==1,0],X_train[y_train==1,1],color='r')
plt.scatter(x[0],x[1],color='b')
plt.show()