PCA 实例演示二维数据降成1维

 1 import numpy as np
 2 # 将二维数据降成1维
 3 num = [(2.5, 2.4), (0.5, 0.7), (2.2, 2.9), (1.9, 2.2), (3.1, 3.0), (2.3, 2.7), (2, 1.6), (1, 1.1), (1.5, 1.6), (1.1, 0.9)]
 4 num_array = np.array(num)
 5 n1_avg, n2_avg = np.mean(num_array[:, 0]), np.mean(num_array[:, 1])
 6 # 1.样本中心化
 7 # new_num_array = np.array(list(zip(num_array[:, 0] - n1_avg, num_array[:, 1] - n2_avg)))
 8 new_num_array = np.c_[num_array[:, 0] - n1_avg, num_array[:, 1] - n2_avg]
 9 # 2.计算协方差矩阵
10 num_cov = np.cov(new_num_array[:, 0], new_num_array[:, 1])
11 # 3.特征值分解
12 # a 特征值, b 特征向量
13 a, b = np.linalg.eig(num_cov)
14 # k=1, 取a最大值的索引对应b的特征向量
15 w = b[:, np.argmax(a)]
16 # 4.输出pca降维结果
17 z1_num = new_num_array.dot(w.T)
18 print(z1_num)
19 
20 # 使用sklearn中的PCA
21 from sklearn.decomposition import PCA
22 pca = PCA(n_components=1)
23 z2_num = pca.fit_transform(num_array)
24 print(z2_num)
原文地址:https://www.cnblogs.com/laresh/p/7622130.html