邏輯回歸實(shí)例操作
importnumpyasnp
importmatplotlib.pyplotasplt
fromsklearnimportdatasets
# 加載鳶尾花數(shù)據(jù)集
iris = datasets.load_iris()
X = iris.data[:, [0,2]]
y = iris.target
# 繪制散點(diǎn)圖
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
plt.scatter(X[y==2,0], X[y==2,1])
plt.show()
fromsklearn.model_selectionimporttrain_test_split
fromsklearn.linear_modelimportLogisticRegression
# 拆分訓(xùn)練集和測(cè)試集
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# 調(diào)用邏輯回歸算法
lr = LogisticRegression(solver='newton-cg', multi_class='multinomial')
lr.fit(X_train, y_train)
# 算法評(píng)分
print('訓(xùn)練得分:', lr.score(X_train, y_train))
print('測(cè)試得分:', lr.score(X_test, y_test))
frommatplotlib.colorsimportListedColormap
# 定義繪制決策邊界的函數(shù)
defplot_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1]-axis[0])*100)).reshape(-1,1),
np.linspace(axis[2], axis[3], int((axis[3]-axis[2])*100)).reshape(-1,1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = model.predict(X_new)
zz = y_predict.reshape(x0.shape)
custom_cmap = ListedColormap(['#EF9A9A','#FFF59D','#90CAF9'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
# 繪制決策邊界
plot_decision_boundary(lr, axis=[4,8,0,8])
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
plt.scatter(X[y==2,0], X[y==2,1])
plt.show()
fromsklearn.metricsimportprecision_score, recall_score, f1_score
# 計(jì)算預(yù)測(cè)值
y_pred = lr.predict(X_test)
# 計(jì)算精準(zhǔn)率
print('精準(zhǔn)率:', precision_score(y_test, y_pred, average="micro"))
# 計(jì)算召回率
print('召回率:', recall_score(y_test, y_pred, average="micro"))
# 計(jì)算 F1分?jǐn)?shù)
print('F1分?jǐn)?shù):', f1_score(y_test, y_pred, average="micro"))