import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.feature.Normalizer
import org.apache.spark.ml.feature.StandardScaler
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.ml.feature.MaxAbsScaler
/**
* @author XiaoTangBao
* @date 2019/3/4 16:21
* @version 1.0
*/
object Normalized {
def main(args: Array[String]): Unit = {
Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
val sparkSession = SparkSession.builder().master("local[4]").appName("NOrmalize").getOrCreate()
val df = sparkSession.createDataFrame(Seq((1, Vectors.dense(1.0, 12.5, -108.0)),
(2, Vectors.dense(2.5, 36.0, 198.0)),(3, Vectors.dense(6.8, 24.0, 459.0))))
.toDF("id","features")
//Normalizer的作用範圍是每一行,使每一個行向量的範數變換爲一個單位範數
val normalizer1 = new Normalizer()
.setInputCol("features")
.setOutputCol("normalfeatures")
.setP(1.0)
val L1 = normalizer1.transform(df)
L1.show(false)
+---+-----------------+------------------------------------------------------------+
|id |features |normalfeatures |
+---+-----------------+------------------------------------------------------------+
|1 |[1.0,12.5,-108.0]|[0.00823045267489712,0.102880658436214,-0.8888888888888888] |
|2 |[2.5,36.0,198.0] |[0.010570824524312896,0.1522198731501057,0.8372093023255814]|
|3 |[6.8,24.0,459.0] |[0.013883217639853,0.04899959167006941,0.9371171906900776] |
+---+-----------------+------------------------------------------------------------+
//StandardScaler處理的對象是每一列,也就是每一維特徵,將特徵標準化爲單位標準差或是0均值,或是0均值單位標準差。
val scaler_1 = new StandardScaler()
.setInputCol("features")
.setOutputCol("scaledFeatures")
.setWithStd(true)
.setWithMean(false)
val scalerMode_l = scaler_1.fit(df)
val scalaerdData_1 = scalerMode_l.transform(df)
scalaerdData_1.show(false)
+---+-----------------+------------------------------------------------------------+
|id |features |scaledFeatures |
+---+-----------------+------------------------------------------------------------+
|1 |[1.0,12.5,-108.0]|[0.3321666477362439,1.0637495315070804,-0.38055308480157485]|
|2 |[2.5,36.0,198.0] |[0.8304166193406097,3.063598650740391,0.6976806554695538] |
|3 |[6.8,24.0,459.0] |[2.2587332046064583,2.0423991004935944,1.617350610406693] |
+---+-----------------+------------------------------------------------------------+
//MinMaxScaler作用同樣是每一列,即每一維特徵。將每一維特徵線性地映射到指定的區間,通常是[0, 1]
val scaler_2 = new MinMaxScaler()
.setInputCol("features")
.setOutputCol("scaledFeatures")
val scalerModel_2 = scaler_2.fit(df)
val scalaerdData_2 = scalerModel_2.transform(df)
scalaerdData_2.show(false)
+---+-----------------+--------------------------------------------+
|id |features |scaledFeatures |
+---+-----------------+--------------------------------------------+
|1 |[1.0,12.5,-108.0]|[0.0,0.0,0.0] |
|2 |[2.5,36.0,198.0] |[0.25862068965517243,1.0,0.5396825396825397]|
|3 |[6.8,24.0,459.0] |[1.0,0.48936170212765956,1.0] |
+---+-----------------+--------------------------------------------+
//MaxAbsScaler將每一維的特徵變換到[-1, 1]閉區間上,通過除以每一維特徵上的最大的絕對值,它不會平移整個分佈,也不會破壞原來每一個特徵向量的稀疏性。
val scaler_3 = new MaxAbsScaler()
.setInputCol("features")
.setOutputCol("scaledFeatures")
val scalerModel_3 = scaler_3.fit(df)
val scalaerdData_3 = scalerModel_3.transform(df)
scalaerdData_3.show(false)
+---+-----------------+-------------------------------------------------------------+
|id |features |scaledFeatures |
+---+-----------------+-------------------------------------------------------------+
|1 |[1.0,12.5,-108.0]|[0.14705882352941177,0.3472222222222222,-0.23529411764705882]|
|2 |[2.5,36.0,198.0] |[0.36764705882352944,1.0,0.43137254901960786] |
|3 |[6.8,24.0,459.0] |[1.0,0.6666666666666666,1.0] |
+---+-----------------+-------------------------------------------------------------+
}
}
Spark ml數據歸一化
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.