Kmeans是一種簡單易用的聚類算法翠霍,是少有的會出現(xiàn)在深度學習項目中的傳統(tǒng)算法以蕴,比如人臉搜索項目蝴乔、物體檢測項目(yolov3中用到了Kmeans進行anchors聚類)等歇攻。
一般使用Kmeans會直接調(diào)sklearn译隘,如果任務(wù)比較復(fù)雜,可以通過numpy進行自定義洛心,這里介紹使用Pytorch實現(xiàn)的方式固耘,經(jīng)測試,通過Pytorch調(diào)用GPU之后词身,能夠提高多特征聚類的速度厅目。
import torch
import time
from tqdm import tqdm
class KMEANS:
def __init__(self, n_clusters=20, max_iter=None, verbose=True,device = torch.device("cpu")):
self.n_cluster = n_clusters
self.n_clusters = n_clusters
self.labels = None
self.dists = None # shape: [x.shape[0],n_cluster]
self.centers = None
self.variation = torch.Tensor([float("Inf")]).to(device)
self.verbose = verbose
self.started = False
self.representative_samples = None
self.max_iter = max_iter
self.count = 0
self.device = device
def fit(self, x):
# 隨機選擇初始中心點,想更快的收斂速度可以借鑒sklearn中的kmeans++初始化方法
init_row = torch.randint(0, x.shape[0], (self.n_clusters,)).to(self.device)
init_points = x[init_row]
self.centers = init_points
while True:
# 聚類標記
self.nearest_center(x)
# 更新中心點
self.update_center(x)
if self.verbose:
print(self.variation, torch.argmin(self.dists, (0)))
if torch.abs(self.variation) < 1e-3 and self.max_iter is None:
break
elif self.max_iter is not None and self.count == self.max_iter:
break
self.count += 1
self.representative_sample()
def nearest_center(self, x):
labels = torch.empty((x.shape[0],)).long().to(self.device)
dists = torch.empty((0, self.n_clusters)).to(self.device)
for i, sample in enumerate(x):
dist = torch.sum(torch.mul(sample - self.centers, sample - self.centers), (1))
labels[i] = torch.argmin(dist)
dists = torch.cat([dists, dist.unsqueeze(0)], (0))
self.labels = labels
if self.started:
self.variation = torch.sum(self.dists - dists)
self.dists = dists
self.started = True
def update_center(self, x):
centers = torch.empty((0, x.shape[1])).to(self.device)
for i in range(self.n_clusters):
mask = self.labels == i
cluster_samples = x[mask]
centers = torch.cat([centers, torch.mean(cluster_samples, (0)).unsqueeze(0)], (0))
self.centers = centers
def representative_sample(self):
# 查找距離中心點最近的樣本法严,作為聚類的代表樣本损敷,更加直觀
self.representative_samples = torch.argmin(self.dists, (0))
def time_clock(matrix,device):
a = time.time()
k = KMEANS(max_iter=10,verbose=False,device=device)
k.fit(matrix)
b = time.time()
return (b-a)/k.count
def choose_device(cuda=False):
if cuda:
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
return device
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.figure()
device = choose_device(False)
cpu_speeds = []
for i in tqdm([20,100,500,2000,8000,20000]):
matrix = torch.rand((10000,i)).to(device)
speed = time_clock(matrix,device)
cpu_speeds.append(speed)
l1, = plt.plot([20,100,500,2000,8000,20000],cpu_speeds,color = 'r',label = 'CPU')
device = choose_device(True)
gpu_speeds = []
for i in tqdm([20, 100, 500, 2000, 8000, 20000]):
matrix = torch.rand((10000, i)).to(device)
speed = time_clock(matrix,device)
gpu_speeds.append(speed)
l2, = plt.plot([20, 100, 500, 2000, 8000, 20000], gpu_speeds, color='g',label = "GPU")
plt.xlabel("num_features")
plt.ylabel("speed(s/iter)")
plt.title("Speed with cuda")
plt.legend(handles = [l1,l2],labels = ['CPU','GPU'],loc='best')
plt.savefig("../result/speed.jpg")
cpu和gpu運行的結(jié)果對比如下:
speed.jpg
可以看到,在特征數(shù)<3000的情況下深啤,cpu運行速度更快拗馒,但是特征數(shù)量超過3000之后,gpu的優(yōu)勢越來越明顯溯街。
因為pytorch的矩陣運算接口基本是照著numpy寫的瘟忱,所以numpy的實現(xiàn)方式大概只需要將代碼中的torch替換成numpy就可以了。