算法小白的第一次嘗試---KPCA(核主成分分析)降維【實例對比分析PCA、LDA和KPCA】

--------------------------------------------------------------------------
筆者追求算法實現,不喜歡大篇幅敘述原理,有關KPCA理論推薦查看該篇博客
https://blog.csdn.net/zjuPeco/article/details/77510981
	
PCA降維歡迎前往筆者上一篇博客:
https://blog.csdn.net/Java_Man_China/article/details/89331554

LDA降維歡迎前往筆者上一篇博客:
https://blog.csdn.net/Java_Man_China/article/details/89504514
--------------------------------------------------------------------  ----
import breeze.linalg.{DenseMatrix, DenseVector, eig}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.{LabeledPoint, StandardScaler, VectorAssembler}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import scala.collection.mutable.ArrayBuffer

/** PCA and LDA can be used to lower the linear datasets,but for the non-linear datasets,we need to
  * borrow kernel function ,So this code show how to use  KPCA  to solve the non-linear data
  * Data Source : http://archive.ics.uci.edu/ml/datasets/Wine
  * @author XiaoTangBao
  * @date 2019/4/29 14:04
  * @version 1.0
  */
object KPCA {
  def main(args: Array[String]): Unit = {
    //屏蔽日誌
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    val spark = SparkSession.builder().master("local[4]").appName("KPCA").getOrCreate()
    val data = spark.sparkContext.textFile("G:\\mldata\\kpca_test.txt").map(str => str.split(','))
      .map(arr => arr.map(str => str.toDouble)).map(arr => Row(arr(0),arr(1),arr(2)))
    //定義Schema和featuresArray
    val schema = StructType(List(StructField("label",DoubleType,true),StructField("x1",DoubleType,true),StructField("x2",DoubleType,true)))
    val featuresArray = Array("x1","x2")
    val df = spark.createDataFrame(data,schema)
    //定義轉化器
    val va = new VectorAssembler().setInputCols(featuresArray).setOutputCol("features")
    val ndf = va.transform(df).select("label","features")
    //rbf核函數參數
    val gama = 15.0
    //降維後的最終緯度
    val dim = 2
    val n1 = run(ndf,dim,15.0)
    val arr = ArrayBuffer[(Double,Double)]()
    for(i<-0 until n1.cols) arr.append((n1(0,i),n1(1,i)))
    arr.foreach(tp =>println(tp._1))
    println("**************")
    arr.foreach(tp =>println(tp._2))
  }

  /**
    * the method attempts to lower the dimensionality by the RBF
    * @param data the ioriginal data which in high dimensions, each col of the data replace one record.
    * @param k the final dimensions
    * @param gama the only one paramter of RBF
    */
  def run(df:DataFrame, k:Int, gama:Double)= {
    //標準化處理數據,標準化後不再需要去中心化
    val stdf = new StandardScaler().setInputCol("features").setOutputCol("Scaledfeatures")
      .setWithMean(true).setWithStd(true).fit(df).transform(df)
      .select("label","Scaledfeatures")
      .withColumnRenamed("Scaledfeatures","features")

    val trainData = stdf.select("features").rdd.map(row => row.toString())
      .map(str => str.replace('[', ' '))
      .map(str => str.replace(']', ' '))
      .map(str => str.trim).map(str => str.split(','))
      .map(arr => arr.map(str => str.toDouble)).collect()

    val labels = stdf.select("label").rdd.map(row => row.toString())
      .map(str => str.replace('[', ' '))
      .map(str => str.replace(']', ' '))
      .map(str => str.trim).map(str => str.toDouble).collect()

    //特徵列數
    val tzz = trainData(0).length

    //生成新的帶label的數據
    val labArr = ArrayBuffer[LabeledPoint]()
    for (i <- 0 until trainData.length) labArr.append(LabeledPoint(labels(i), Vectors.dense(trainData(i))))

    //總樣本組成的大型矩陣
    val allData = labArr.map(lab => lab.features).map(vec => vec.toArray).flatMap(x => x).toArray
    val big_Matrx = new DenseMatrix[Double](tzz, trainData.length, allData)

    //計算樣本的核矩陣
    var kMatrix = DenseMatrix.zeros[Double](big_Matrx.cols,big_Matrx.cols)
    for(i<-0 until kMatrix.rows){
      val vi = big_Matrx(::,i)
      for(j<-0 until kMatrix.cols){
        kMatrix(i,j) = rbf(vi,big_Matrx(::,j),gama)
      }
    }

    //聚集核矩陣
    var LMatrix = DenseMatrix.zeros[Double](kMatrix.rows,kMatrix.cols)
    for(i<-0 until LMatrix.cols) LMatrix(::,i) := 1.0 / kMatrix.rows
    kMatrix = kMatrix - LMatrix * kMatrix - kMatrix * LMatrix + LMatrix * kMatrix * LMatrix

    //計算樣本核矩陣的特徵值和特徵向量
    val eigValues = eig(kMatrix).eigenvalues
    //此處返回的eigVectors已經單位化了
    val eigVectors = eig(kMatrix).eigenvectors

    //選取最大的k個特徵值對應的特徵向量
    val label_eig = DenseMatrix.horzcat(eigVectors.t,eigValues.toDenseMatrix.t)
    var strArr = ArrayBuffer[String]()
    for(i<-0 until label_eig.rows) strArr.append(label_eig.t(::,i).toString)
    for(i<-0 until strArr.length){
      strArr(i) = strArr(i).replace("DenseVector(","").replace(')',' ').trim()
    }
    val da = ArrayBuffer[LabeledPoint]()
    for(str <- strArr){
      val arr = str.split(',').map(string => string.toDouble)
      val lab = arr.takeRight(1)(0)
      val value = arr.take(arr.length -1)
      val labPoint = LabeledPoint(lab,Vectors.dense(value))
      da.append(labPoint)
    }

    //假設此處沒有問題---我估計還是有點問題,不然爲啥和python上的不同呢 ????
    val result = da.sortBy(labPoint => labPoint.label).reverse.take(k).map(lab => lab.features).map(vec => vec.toArray)
    var rt = DenseMatrix.zeros[Double](result.length,result(0).length)
    for(i<-0 until rt.rows){
      for(j<-0 until rt.cols){
        rt(i,j) = result(i)(j)
      }
    }
    rt
  }
  def rbf(v1:DenseVector[Double],v2:DenseVector[Double],gama:Double)={
    val index_cof = (v1 - v2) dot (v1 - v2)
    val result = math.exp((-1.0) * gama * index_cof)
    result
  }
}

採用KPCA,分別對葡萄酒數據進行了降維處理,結果如下圖所示:
Java_Man_China
在這裏插入圖片描述
與Python調庫結果對比,發現兩者基本一致
在這裏插入圖片描述

同時爲了對比線性降維的效果,採用PCA和LDA分別對數據進行了降維,結果如下圖所示:
在這裏插入圖片描述
Java_Man_China
在這裏插入圖片描述
Java_Man_China
實驗結果表明:對於非線性可分數據,PCA和LDA降維效果不理想,而KPCA對於非線性數據,降維效果明顯。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章