PCA降維及C代碼

轉載至:http://blog.csdn.net/u011001084/article/details/51363892

轉載至:http://blog.csdn.net/jinshengtao/article/details/18599165


PCA人臉識別

        將PCA用於人臉識別的過程如下:

        1.假設有400幅尺寸爲100*100的圖像,構成10000*400的矩陣X=[x_1,\dots,x_n]

        2.計算均值\mu=\frac{1}{n}\sum_{j=1}^n x_j,令H=\frac{1}{\sqrt{n-1}}[x_1-\mu,\dots,x_n-\mu]

        3.根據定義,計算協方差矩陣\Sigma=HH^T

        4.計算\Sigma的特徵值與特徵向量,取前h個最大特徵值所對應的特徵向量,構成矩陣\Phi

        5.矩陣\Phi可對數據降維:\Phi^T X=Y,Y是h行400列的矩陣,也就是將數據從10000維降爲h維。

        這種做法一個明顯的缺陷在於,\Sigma的維度爲10000×10000,直接進行奇異值分解計算量非常大。利用QR分解,作間接的奇異值分解,可以減小計算量。

利用QR分解減小計算量

        基於QR分解的PCA算法步驟如下:

        1.已知\Sigma=HH^T,其中\Sigma爲d*d,H爲d*n,d代表原始數據的維數,n代表樣本數,d遠大於n;

        2.對H作QR分解,h=QR,其中Q爲d*t,R爲t*n,1\leq t \leq n

        3.\Sigma=QRR^T Q^T,對R^T作奇異值分解R^T=UDV^T,其中U爲n*t,V爲t*t,D=diag(\sigma_1,\dots,\sigma_t)

        4.於是\Sigma=QVDU^T UDV^T Q^T=QVD^2 V^T Q^T=QV\Lambda V^T Q^T,其中\Lambda=D^2

        5.由於(QV)^T (QV)=V^T Q^T QV=V^T V=I,所以QV可將\Sigma對角化,QV爲\Sigma的特徵向量矩陣,\Lambda\Sigma的特徵值矩陣;

        6.選取D前h個最大對角元所對應於V中的h個列,構成t*h的矩陣V_h,則降維矩陣\Phi=QV_h

1。加載數據, 計算均值,生成協相關矩陣
void load_data(double *T,IplImage *src,int k)  
{  
    int i,j;  
  
    //一副圖像壓縮成一維的,存在T的一列裏  
    for (i=0;i<IMG_HEIGHT;i++)  
    {  
        for (j=0;j<IMG_WIDTH;j++)  
        {  
            T[(i*IMG_WIDTH+j)*TRAIN_NUM+k-1]= (double)(unsigned char)src->imageData[i*IMG_WIDTH+j];  
        }  
    }  
}  
void calc_mean(double *T,double *m)  
{  
    int i,j;  
    double temp;  
  
    for (i=0;i<IMG_WIDTH*IMG_HEIGHT;i++)  
    {  
        temp=0;  
        for (j=0;j<TRAIN_NUM;j++)  
        {  
            temp = temp + T[i*TRAIN_NUM+j];  
        }  
        m[i] = temp/TRAIN_NUM;  
    }  
}  
  
void calc_covariance_matrix(double *T,double *L,double *m)  
{  
    int i,j,k;  
    double *T1;  
  
    //T = T -m  
    for (i=0;i<IMG_WIDTH*IMG_HEIGHT;i++)  
    {  
        for (j=0;j<TRAIN_NUM;j++)  
        {  
            T[i*TRAIN_NUM+j] = T[i*TRAIN_NUM+j] - m[i];  
        }  
    }  
  
    T1 = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*TRAIN_NUM);  
  
    //L = T' * T  
    matrix_reverse(T,T1,IMG_WIDTH*IMG_HEIGHT,TRAIN_NUM);  
    matrix_mutil(L,T1,T,TRAIN_NUM,IMG_HEIGHT*IMG_WIDTH,TRAIN_NUM);  
  
    free(T1);  
}  

2.計算生成矩陣P的特徵值和特徵向量,並挑選合適的特徵值和特徵向量,構造特徵子空間變化矩陣。

void cstrq(double a[],int n,double q[],double b[],double c[])  
{  
    int i,j,k,u,v;  
    double h,f,g,h2;  
    for (i=0; i<=n-1; i++)  
        for (j=0; j<=n-1; j++)  
        { u=i*n+j; q[u]=a[u];}  
        for (i=n-1; i>=1; i--)  
        { h=0.0;  
        if (i>1)  
            for (k=0; k<=i-1; k++)  
            { u=i*n+k; h=h+q[u]*q[u];}  
            if (h+1.0==1.0)  
            { c[i]=0.0;  
            if (i==1) c[i]=q[i*n+i-1];  
            b[i]=0.0;  
            }  
            else  
            { c[i]=sqrt(h);  
            u=i*n+i-1;  
            if (q[u]>0.0) c[i]=-c[i];  
            h=h-q[u]*c[i];  
            q[u]=q[u]-c[i];  
            f=0.0;  
            for (j=0; j<=i-1; j++)  
            { q[j*n+i]=q[i*n+j]/h;  
            g=0.0;  
            for (k=0; k<=j; k++)  
                g=g+q[j*n+k]*q[i*n+k];  
            if (j+1<=i-1)  
                for (k=j+1; k<=i-1; k++)  
                    g=g+q[k*n+j]*q[i*n+k];  
            c[j]=g/h;  
            f=f+g*q[j*n+i];  
            }  
            h2=f/(h+h);  
            for (j=0; j<=i-1; j++)  
            { f=q[i*n+j];  
            g=c[j]-h2*f;  
            c[j]=g;  
            for (k=0; k<=j; k++)  
            { u=j*n+k;  
            q[u]=q[u]-f*c[k]-g*q[i*n+k];  
            }  
            }  
            b[i]=h;  
            }  
        }  
        for (i=0; i<=n-2; i++) c[i]=c[i+1];  
        c[n-1]=0.0;  
        b[0]=0.0;  
        for (i=0; i<=n-1; i++)  
        { if ((b[i]!=0.0)&&(i-1>=0))  
        for (j=0; j<=i-1; j++)  
        { g=0.0;  
        for (k=0; k<=i-1; k++)  
            g=g+q[i*n+k]*q[k*n+j];  
        for (k=0; k<=i-1; k++)  
        { u=k*n+j;  
        q[u]=q[u]-g*q[k*n+i];  
        }  
        }  
        u=i*n+i;  
        b[i]=q[u]; q[u]=1.0;  
        if (i-1>=0)  
            for (j=0; j<=i-1; j++)  
            { q[i*n+j]=0.0; q[j*n+i]=0.0;}  
        }  
        return;  
}  
  
//q:特徵向量,b:特徵值  
int csstq(int n,double b[],double c[],double q[],double eps,int l)  
{  
    int i,j,k,m,it,u,v;  
    double d,f,h,g,p,r,e,s;  
    c[n-1]=0.0; d=0.0; f=0.0;  
    for (j=0; j<=n-1; j++)  
    { it=0;  
    h=eps*(fabs(b[j])+fabs(c[j]));  
    if (h>d) d=h;  
    m=j;  
    while ((m<=n-1)&&(fabs(c[m])>d)) m=m+1;  
    if (m!=j)  
    { do  
    { if (it==l)  
    { printf("fail\n");  
    return(-1);  
    }  
    it=it+1;  
    g=b[j];  
    p=(b[j+1]-g)/(2.0*c[j]);  
    r=sqrt(p*p+1.0);  
    if (p>=0.0) b[j]=c[j]/(p+r);  
    else b[j]=c[j]/(p-r);  
    h=g-b[j];  
    for (i=j+1; i<=n-1; i++)  
        b[i]=b[i]-h;  
    f=f+h; p=b[m]; e=1.0; s=0.0;  
    for (i=m-1; i>=j; i--)  
    { g=e*c[i]; h=e*p;  
    if (fabs(p)>=fabs(c[i]))  
    { e=c[i]/p; r=sqrt(e*e+1.0);  
    c[i+1]=s*p*r; s=e/r; e=1.0/r;  
    }  
    else  
    { e=p/c[i]; r=sqrt(e*e+1.0);  
    c[i+1]=s*c[i]*r;  
    s=1.0/r; e=e/r;  
    }  
    p=e*b[i]-s*g;  
    b[i+1]=h+s*(e*g+s*b[i]);  
    for (k=0; k<=n-1; k++)  
    { u=k*n+i+1; v=u-1;  
    h=q[u]; q[u]=s*q[v]+e*h;  
    q[v]=e*q[v]-s*h;  
    }  
    }  
    c[j]=s*p; b[j]=e*p;  
    }  
    while (fabs(c[j])>d);  
    }  
    b[j]=b[j]+f;  
    }  
    for (i=0; i<=n-1; i++)  
    { k=i; p=b[i];  
    if (i+1<=n-1)  
    { j=i+1;  
    while ((j<=n-1)&&(b[j]<=p))  
    { k=j; p=b[j]; j=j+1;}  
    }  
    if (k!=i)  
    { b[k]=b[i]; b[i]=p;  
    for (j=0; j<=n-1; j++)  
    { u=j*n+i; v=j*n+k;  
    p=q[u]; q[u]=q[v]; q[v]=p;  
    }  
    }  
    }  
    return(1);  
}  
  
void matrix_reverse(double *src,double *dest,int row,int col)   //轉置  
{  
    int i,j;  
  
    for(i = 0;i < col;i++)  
    {  
           for(j = 0;j < row;j++)  
           {  
         dest[i * row + j] = src[j * col + i];  
           }  
         }  
}  
  
void matrix_mutil(double *c,double *a,double *b,int x,int y,int z)  //矩陣乘法  
{  
    int i,j,k;  
    for (i=0;i<x;i++)  
    {  
        for (k=0;k<z;k++)  
        {  
            for (j=0;j<y;j++)  
            {  
                c[i*z+k] +=a[i*y+j]*b[j*z+k];  
            }  
        }  
    }  
}  

3.挑選合適的特徵值和特徵向量,其實就是挑特徵值大於1的

void pick_eignevalue(double *b,double *q,double *p_q,int num_q)  
{  
    int i,j,k;  
  
    k=0;//p_q的列  
    for (i=0;i<TRAIN_NUM;i++)//col  
    {  
        if (b[i]>1)  
        {  
            for (j=0;j<TRAIN_NUM;j++)//row  
            {  
                p_q[j*num_q+k] = q[j*TRAIN_NUM+i];//按列訪問q,按列存儲到p_q  
  
            }  
            k++;  
        }  
    }  
}  

4計算Q的特徵向量和樣本集像子空間投影的代碼

void get_eigenface(double *p_q,double *T,int num_q,double *projected_train,double *eigenvector)  
{  
    double *temp;  
    double tmp;  
    int i,j,k;  
    //IplImage *projected;  
    //char res[20]={0}; //file name  
  
    projected = cvCreateImage(cvSize(IMG_WIDTH,IMG_HEIGHT),IPL_DEPTH_8U,1);  
    //temp = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*num_q);//按列存取  
  
    memset(eigenvector,0,sizeof(double)*IMG_HEIGHT*IMG_WIDTH*num_q);  
    memset(projected_train,0,sizeof(double)*TRAIN_NUM*num_q);  
      
    //求特徵臉  
    //matrix_mutil(temp,T,p_q,IMG_WIDTH*IMG_HEIGHT,TRAIN_NUM,num_q);  
      
    /*for (i=0;i<num_q;i++) 
    { 
        sprintf(res,"%d.jpg",i); 
        for (j=0;j<IMG_HEIGHT;j++) 
        { 
            for (k=0;k<IMG_WIDTH;k++) 
            { 
                projected->imageData[j*IMG_WIDTH+k] = (unsigned char)abs(temp[(j*IMG_WIDTH+k)*num_q+i]); 
            } 
        } 
        cvSaveImage(res,projected); 
    }*/  
  
      
  
    //求Q的特徵向量X*e,矩陣相乘  
    temp = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*num_q);  
         matrix_mutil(temp,T,p_q,IMG_HEIGHT*IMG_WIDTH,TRAIN_NUM,num_q);  
  
    //投影到子空間  
    matrix_reverse(temp,eigenvector,IMG_WIDTH*IMG_HEIGHT,num_q);  
         matrix_mutil(projected_train,eigenvector,T,num_q,IMG_WIDTH*IMG_HEIGHT,TRAIN_NUM);  
         free(temp);  
}  
#include "stdafx.h"  
#include "Process.h"  
#include "My_Matrix.h"  
  
int _tmain(int argc, _TCHAR* argv[])  
{  
    double *T,*L,*m,*b,*q,*c,*p_q,*projected_train,*T_test,*projected_test,*eigenvector,*Euc_dist;  
    double eps,temp;  
    int i,j,flag,iteration,num_q;  
    char res[20];  
    IplImage *tmp_img,*test_img;  
  
    T = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*TRAIN_NUM);    //原始數據  
    T_test = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*1);       //測試數據  
    m = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH);      //平均值  
    L = (double *)malloc(sizeof(double)*TRAIN_NUM*TRAIN_NUM);       //L=T'*T,協方差矩陣  
    b = (double *)malloc(sizeof(double)*TRAIN_NUM);             //L的特徵值  
    q = (double *)malloc(sizeof(double)*TRAIN_NUM*TRAIN_NUM);   //L特徵值對應的特徵向量  
    c = (double *)malloc(sizeof(double)*TRAIN_NUM);             //實對稱三對角矩陣的次對角線元素  
  
    eps = 0.000001;  
    memset(L,0,sizeof(double)*TRAIN_NUM*TRAIN_NUM);  
      
    //存儲圖像數據到T矩陣  
    for (i=1;i<=TRAIN_NUM;i++)  
    {  
        sprintf(res,".\\TrainDatabase\\%d.jpg",i);  
        tmp_img = cvLoadImage(res,CV_LOAD_IMAGE_GRAYSCALE);  
        load_data(T,tmp_img,i);  
    }  
      
    //求T矩陣行的平均值  
    calc_mean(T,m);  
  
    //構造協方差矩陣  
    calc_covariance_matrix(T,L,m);  
  
    //求L的特徵值,特徵向量  
    iteration = 60;  
    cstrq(L,TRAIN_NUM,q,b,c);  
    flag = csstq(TRAIN_NUM,b,c,q,eps,iteration); //數組q中第j列爲數組b中第j個特徵值對應的特徵向量  
    if (flag<0)  
    {  
        printf("fucking failed!\n");  
    }else  
    {  
        printf("success to get eigen value and vector\n");  
    }  
  
    //對L挑選合適的特徵值,過濾特徵向量  
    num_q=0;  
    for (i=0;i<TRAIN_NUM;i++)  
    {  
        if (b[i]>1)  
        {  
            num_q++;  
        }  
    }  
    p_q = (double *)malloc(sizeof(double)*TRAIN_NUM*TRAIN_NUM);         //挑選後的L的特徵向量,僅過濾,未排序  
    projected_train = (double *)malloc(sizeof(double)*TRAIN_NUM*num_q); //投影后的訓練樣本特徵空間  
    eigenvector = (double *)malloc(sizeof(double)*IMG_HEIGHT*IMG_WIDTH*num_q);//Pe=λe,Q(Xe)=λ(Xe),投影變換向量  
    pick_eignevalue(b,q,p_q,num_q);  
    get_eigenface(p_q,T,num_q,projected_train,eigenvector);  
  
    //讀取測試圖像  
    test_img = cvLoadImage(".\\TestDatabase\\4.jpg",CV_LOAD_IMAGE_GRAYSCALE);  
    projected_test = (double *)malloc(sizeof(double)*num_q*1);//在特徵空間投影后的測試樣本  
    for (i=0;i<IMG_HEIGHT;i++)  
    {  
        for (j=0;j<IMG_WIDTH;j++)  
        {  
            T_test[i*IMG_WIDTH+j] = (double)(unsigned char)test_img->imageData[i*IMG_WIDTH+j] - m[i*IMG_WIDTH+j];  
        }  
    }  
  
    //將待測數據投影到特徵空間  
    memset(projected_test,0,sizeof(double)*num_q);  
    matrix_mutil(projected_test,eigenvector,T_test,num_q,IMG_WIDTH*IMG_HEIGHT,1);  
    return 0;  
}  


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章