基本Kmeans算法
import numpy as np
import random
def calc_dist(vec1, vec2):
"""計算兩個向量的歐氏距離"""
return np.sqrt(sum(np.power(vec1-vec2, 2)))
def rand_cent(dataSet, k):
"""取k個隨機質(zhì)心午磁,質(zhì)心的每個特征的值是在數(shù)據(jù)上下界中隨機選取
不一定是數(shù)據(jù)中包含的值"""
n = shape(dataSet)[1]
centroids = np.mat(np.zeros((k, n)))
for j in range(n):
min_val = min(dataSet[:, j])
range_val = max(dataSet[:, j]) - min_val
centroids[j] = min_val + range_val * random.rand(k, 1)
return centroids
def Kmeans(dataSet, k, dist_eval = calc_dist, create_cent = rand_cent):
"""
創(chuàng)建隨機的K個點作為起始質(zhì)心
當(dāng)任意一個點的簇分配結(jié)果發(fā)生改變時:
對數(shù)據(jù)中的每個數(shù)據(jù)點:
對每個質(zhì)心:
計算質(zhì)心與數(shù)據(jù)點之間的距離
將數(shù)據(jù)點分配到距其最近的簇
對每個簇拦止,計算簇中所有點的均值并將其作為質(zhì)心
"""
m = shape(dataSet)[0]
cluster_assign = np.mat(np.zeros((m, 2))) #樣本屬于哪個簇和距離
centroids = create_cent(dataSet, k) #隨機創(chuàng)建質(zhì)心
cluster_change = True #程序終止條件
while cluster_change:
cluster_change = False
#更新每個樣本的所在簇
for i in range(m):
min_dist = np.inf
min_index = -1
for j in range(k):
dist = dist_eval(dataSet[i, :], centroids[j, :])
if dist < min_dist:
min_dist = dist
min_index = j
if min_index != cluster_assign[i]:
cluster_change = True
cluster_assign[i] = [min_index, min_dist]
if not cluster_change:
break
#更新每個簇
for cent in range(k):
cluster_sample = dataSet[np.nonzero(cluster_assign[:, 0].A == cent)[0]]
centroids[cent, :] = np.mean(cluster_sample, axis = 0)
return centroids, cluster_assign
二分KMeans算法
def bin_Kmeans(dataSet, k, dist_eval = calc_dist):
"""
Kmeans容易收斂到局部最小值窍霞,為克服饥追,有二分Kmeans:
1. 將所有點看成一個簇
2. 當(dāng)簇數(shù)目小于k時繼續(xù)劃分:
對于每一個簇:
計算總誤差
在給定的簇上面進(jìn)行2Means聚類
計算將該簇一分為2之后的誤差 + 其他簇類的誤差作為新的總誤差
選擇使得總誤差最小的的分割
"""
m = shape(dataSet)[0]
cluster_assign = np.mat(np.zeros((m, 2))) #第一列保存所屬簇id咕痛, 第二列保存距離
centroid0 = np.mean(dataSet, axis = 0).tolist()[0] #初始簇為全數(shù)據(jù)集的中心
cent_list = [centroid0]
for i in range(m):
cluster_assign[i, 1] = dist_eval(np.mat(centroid0), dataSet[i, :]) ** 2 #用歐式距離的平方颠印,更重視那些遠(yuǎn)離中心的點
while len(cent_list) < k:
lowest_sse = np.inf # sum of squared error
#選擇最優(yōu)分割簇瘤载,使總誤差最小
for i in range(len(cent_list)):
cluster_samples = dataSet[np.nonzero(cluster_assign[:, 0].A == i)[0], :] #這個簇的所有樣本
centroids, clusters = Kmeans(cluster_samples, 2, dist_eval) #對這個簇的樣本一分為2
sse_other_cluster = sum(cluster_assign[np.nonzero(cluster_assign[:, 0].A != i)[0], 1]) #其他簇類的誤差
sse_split = sum(clusters[:, 1]) #劃分部分的誤差
if sse_split + sse_other_cluster < lowest_sse:
lowest_sse = sse_split + sse_other_cluster
best_new_cnets = centroids
best_clusters = clusters
best_split_cent = i
#更新簇的分配結(jié)果
best_clusters[np.nonzero(best_clusters[:, 0].A == 1)[0], 0] = len(cent_list) #新的簇id
best_clusters[np.nonzero(best_clusters[:, 0].A == 0)[0], 0] = best_split_cent #被分割的簇id
cent_list[best_split_cent] = best_new_cnets[0:] #更新簇的特征值
cent_list.append(best_new_cnets[1:])
cluster_assign[np.nonzero(cluster_assign[:, 0].A == best_split_cent)[0], :] = best_clusters #更新總的樣本所屬簇記錄
return np.mat(cent_list), cluster_assign