Pythonで一歩ずつPCAを実現します.

19111 ワード

Requirements:Python環境展開:http://blog.csdn.net/luzhangting/article/details/61414485 PCA原理:http://blog.csdn.net/zhongkelee/article/details/44064401
第1ステップ:3次元のサンプルデータを生成して、40個の3次元データを生成し、2つのクラスに分けて、それぞれ20個の第1クラス:平均値[0,0,0]、分散[1,0,0]、[0,1]第一クラス:平均値[1,1,1],分散[1,0,0],[0,1,0],[0,1,1].
import numpy as np

np.random.seed(1)

mu_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20).T
assert class1_sample.shape == (3,20), "The matrix has not the dimensions 3x20"

mu_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20).T
assert class2_sample.shape == (3,20), "The matrix has not the dimensions 3x20"
グラフを見て良いデータを生成します.
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d

fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
plt.rcParams['legend.fontsize'] = 10   
ax.plot(class1_sample[0,:], class1_sample[1,:], class1_sample[2,:], 'o', markersize=8, color='blue', alpha=0.5, label='class1')
ax.plot(class2_sample[0,:], class2_sample[1,:], class2_sample[2,:], '^', markersize=8, alpha=0.5, color='red', label='class2')

plt.title('Samples for class 1 and class 2')
ax.legend(loc='upper right')

plt.show()
用Python一步一步实现PCA_第1张图片
データを整理して、PCAメソッドがデータを分類して処理しないため、これもPCAの欠点の一つです.
all_samples = np.concatenate((class1_sample, class2_sample), axis=1)
assert all_samples.shape == (3,40), "The matrix has not the dimensions 3x40"
第二ステップ:平均ベクトルの計算
mean_x = np.mean(all_samples[0,:])
mean_y = np.mean(all_samples[1,:])
mean_z = np.mean(all_samples[2,:])

mean_vector = np.array([[mean_x],[mean_y],[mean_z]])

print('Mean Vector:
'
, mean_vector)
Mean Vector:
 [[ 0.41667492]
 [ 0.69848315]
 [ 0.49242335]]
第三ステップ:離散度行列Scanter Matrixまたは共分散行列Covariance Matrixを計算する.
scatter_matrix = np.zeros((3,3))
for i in range(all_samples.shape[1]):
    scatter_matrix += (all_samples[:,i].reshape(3,1) - mean_vector).dot((all_samples[:,i].reshape(3,1) - mean_vector).T)
print('Scatter Matrix:
'
, scatter_matrix)
Scatter Matrix:
 [[ 38.4878051   10.50787213   11.13746016]
 [10.50787213  36.23651274   11.96598642 ]
 [11.13746016  11.96598642   49.73596619 ]]
cov_mat = np.cov([all_samples[0,:],all_samples[1,:],all_samples[2,:]])
print('Covariance Matrix:
'
, cov_mat)
Covariance Matrix:
 [[ 0.9868668  0.26943262   0.2855759]
 [ 0.26943262  0.92914135   0.30682016]
 [ 0.2855759   0.30682016   1.27528118]]
第四ステップ:特徴値と特徴ベクトルを計算する.
eig_val_sc, eig_vec_sc = np.linalg.eig(scatter_matrix)

eig_val_cov, eig_vec_cov = np.linalg.eig(cov_mat)

for i in range(len(eig_val_sc)):
    eigvec_sc = eig_vec_sc[:,i].reshape(1,3).T
    eigvec_cov = eig_vec_cov[:,i].reshape(1,3).T
    assert eigvec_sc.all() == eigvec_cov.all(), 'Eigenvectors are not identical'

    print('Eigenvector {}: 
{}'
.format(i+1, eigvec_sc)) print('Eigenvalue {} from scatter matrix: {}'.format(i+1, eig_val_sc[i])) print('Eigenvalue {} from covariance matrix: {}'.format(i+1, eig_val_cov[i])) print('Scaling factor: ', eig_val_sc[i]/eig_val_cov[i]) print(40 * '-')
Eigenvector 1:[-0.49210223][-0.47927902][-0.72672348]]Eigenvalue 1 from scater matrix:65.1693677908 Eigenvalue 1 from covariance marrix:1.67100943053(‘Scring factor:',39.00000 Ent 7362])Eigenvalue 2 from scatomatix:32.947129632 Eigenvalue 2 from covariance marix:0.838325973416(‘Scring factor:',39.00000 21)Eigenvector 3:[0.8276136]、[-0.8015209][0.33043]Efter Efter(‘Scring factor:',39.00000 000028)
好奇心を満たすために、特徴ベクトルを描いてみます.
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch


class Arrow3D(FancyArrowPatch):
    def __init__(self, xs, ys, zs, *args, **kwargs):
        FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
        self._verts3d = xs, ys, zs

    def draw(self, renderer):
        xs3d, ys3d, zs3d = self._verts3d
        xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
        self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
        FancyArrowPatch.draw(self, renderer)

fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')

ax.plot(all_samples[0,:], all_samples[1,:], all_samples[2,:], 'o', markersize=8, color='green', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
for v in eig_vec_sc.T:
    a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
    ax.add_artist(a)
ax.set_xlabel('x_values')
ax.set_ylabel('y_values')
ax.set_zlabel('z_values')

plt.title('Eigenvectors')

plt.show()
用Python一步一步实现PCA_第2张图片
第5ステップ:特徴値に基づいて特徴ベクトルを並べ替え、Top-k個の特徴ベクトルを選択する.
eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]

eig_pairs.sort(key=lambda x: x[0], reverse=True)

for i in eig_pairs:
    print(i[0])
65.1693677908 32.947129632 26.596203221
この例では三次元を二次元に下げる.
matrix_w = np.hstack((eig_pairs[0][1].reshape(3,1), eig_pairs[1][1].reshape(3,1)))
print('Matrix W:
', matrix_w)
Matrix W:
   [[-0.49210223, -0.64670286],
   [-0.47927902, -0.35756937],
   [-0.72672348,  0.67373552]]
第六ステップ:データをサブスペースにマッピングする
transformed = matrix_w.T.dot(all_samples)

plt.plot(transformed[0,0:20], transformed[1,0:20], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(transformed[0,20:40], transformed[1,20:40], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.legend()
plt.title('Transformed samples with class labels')

plt.show()
用Python一步一步实现PCA_第3张图片
第7ステップ:他の実施形態は、matplotlib.mlab.PCA()を使用する.
from matplotlib.mlab import PCA as mlabPCA

mlab_pca = mlabPCA(all_samples.T)

print('PC axes in terms of the measurement axes scaled by the standard deviations:
'
, mlab_pca.Wt) plt.plot(mlab_pca.Y[0:20,0],mlab_pca.Y[0:20,1], 'o', markersize=7, color='blue', alpha=0.5, label='class1') plt.plot(mlab_pca.Y[20:40,0], mlab_pca.Y[20:40,1], '^', markersize=7, color='red', alpha=0.5, label='class2') plt.xlabel('x_values') plt.ylabel('y_values') plt.xlim([-4,4]) plt.ylim([-4,4]) plt.legend() plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()') plt.show()
PC axes in terms of the measurement axes scaled by the standard deviations:
   [[-0.57087338, -0.58973911, -0.5712367 ],
   [-0.71046744,  0.006114  ,  0.70370352],
   [ 0.41150894, -0.80757068,  0.42248075]]
用Python一步一步实现PCA_第4张图片の計算結果は上記とはちょっと違っています.共分散行列を計算する前に標準化された処理が行われているからです.
slearn.decomponeを使う
from sklearn.decomposition import PCA as sklearnPCA

sklearn_pca = sklearnPCA(n_components=2)
sklearn_transf = sklearn_pca.fit_transform(all_samples.T)
sklearn_transf = sklearn_transf * (-1)

# sklearn.decomposition.PCA
plt.plot(sklearn_transf[0:20,0],sklearn_transf[0:20,1], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(sklearn_transf[20:40,0], sklearn_transf[20:40,1], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend()
plt.title('Transformed samples via sklearn.decomposition.PCA')
plt.show()
用Python一步一步实现PCA_第5张图片