通俗易懂举栗子–怎么理解支持向量机(SVM)?
1. 讲解SVM
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45# coding = utf-8 """ @目的:展示svm """ import numpy as np from sklearn import svm import matplotlib.pyplot as plt # 生成随机数据集 np.random.seed(2) x = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] y = [-1] * 20 + [1] * 20 print(x) print(y) # 构建模型 model = svm.SVC(kernel='linear', C=1.0) model.fit(x, y) support_vectors = model.support_vectors_ # 支持向量 index_ = model.support_ # 支持向量的索引 num = model.n_support_ # 每一类中有几个支持向量 print(support_vectors) print(index_) print(num) # 预测 x_ = np.array([[0, 1], [3, 4], [-1, -1]]) y_pred = model.predict(x_) print(y_pred) # 拟合 a = model.intercept_ b = model.coef_ # 绘制图形 plt.scatter(support_vectors[:, 0], support_vectors[:, 1], s=100, edgecolors='k') plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.coolwarm, marker='o', s=50) x1 = np.linspace(-5, 4, 100) x2 = (-b[0][0] * x1 - a[0]) / b[0][1] plt.plot(x1, x2, 'k') plt.plot(x1, (-b[0][0] * x1 - a[0] - 1) / b[0][1], 'k--') plt.plot(x1, (-b[0][0] * x1 - a[0] + 1) / b[0][1], 'k--') plt.scatter(x_[:, 0], x_[:, 1], c=y_pred, marker='^', s=80) plt.axis('tight') plt.show()
2. 使用svm做鸢尾花分类
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48# coding = utf-8 """ @目的:用svm做鸢尾花分类 """ from sklearn import datasets, svm from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import colors import numpy as np plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签 plt.rcParams['axes.unicode_minus'] = False # 加载数据 iris = datasets.load_iris() iris_feature = iris['data'] iris_target = iris['target'] iris_target_name = iris['target_names'] # print(iris) def show(): t0 = [index for index in range(len(iris_target)) if iris_target[index] == 0] t1 = [index for index in range(len(iris_target)) if iris_target[index] == 1] t2 = [index for index in range(len(iris_target)) if iris_target[index] == 2] plt.scatter(x=iris_feature[t0, 0], y=iris_feature[t0, 1], color='r', label='Iris-virginica') plt.scatter(x=iris_feature[t1, 0], y=iris_feature[t1, 1], color='g', label='Iris-setosa') plt.scatter(x=iris_feature[t2, 0], y=iris_feature[t2, 1], color='b', label='Iris-versicolor') plt.xlabel("花萼长度", fontsize=20) plt.ylabel("花瓣长度", fontsize=20) plt.title("数据集展示", fontsize=20) plt.legend(fontsize=20) plt.show() if __name__ == '__main__': # show() feature_train, feature_test, target_train, target_test = train_test_split(iris_feature, iris_target, test_size=0.33, random_state=10) model = svm.SVC(C=1.0, kernel='rbf', decision_function_shape='ovr', gamma=0.01) model.fit(feature_train, target_train) print("训练集:", model.score(feature_train, target_train)) print("测试集:", model.score(feature_test, target_test)) target_test_predict = model.predict(feature_test) comp = zip(target_test, target_test_predict) print(list(comp)) plt.figure() plt.subplot(121) plt.scatter(feature_test[:, 0], feature_test[:, 1], c=target_test.reshape((-1)), edgecolors='k', s=50) plt.subplot(122) plt.scatter(feature_test[:, 0], feature_test[:, 1], c=target_test_predict.reshape((-1)), edgecolors='k', s=50) plt.show()
有时间再将三个文件内容进行完善。
最后
以上就是着急钻石最近收集整理的关于数据分析3(svm)1. 讲解SVM2. 使用svm做鸢尾花分类的全部内容,更多相关数据分析3(svm)1.内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复