使用spark-shell 通過groupByKey方法將行專列。
一、需求
在HDFS或本地目錄有一份text格式數據(數據內容爲英文逗號分隔,字段內容爲uid,value,key),現在要求將這些數據按照uid分組,最後保存的格式爲uid,uidValue|key1,value1|key2,value2|… 。具體參見下方:
//輸入數據
cat /home/hadoop/data/source.csv
8888880674736793701,1003422322c6c937c63af250637fb07e89012,a_zmxy_bindid
8888880674736793701,82a5d0ff83cdd46f8aed902a02105e9f,id_no
8888880674736793701,098dcdb655a715d3816ee8d4854f5db9,regist_mobile
8888880674736793701,c4de399c388097b9a3909bb94cfd51a1,travel_emerg_mobile
8888880674736793702,82a5d0ff83cdd46f8aed902a02105e93,id_no
8888880674736793702,1003422322c6c937c63af250637fb07e89013,b_zmxy_bindid
8888880674736793702,098dcdb655a715d3816ee8d4854f5db3,regist_mobile
8888880674736793702,c4de399c388097b9a3909bb94cfd51a3,travel_emerg_mobile
8888880674736793704,1003422322c6c937c63af250637fb07e89014,a_zmxy_bindid
8888880674736793704,82a5d0ff83cdd46f8aed902a02105e94,id_no
8888880674736793704,098dcdb655a715d3816ee8d4854f5db4,regist_mobile
8888880674736793704,c4de399c388097b9a3909bb94cfd51a4,travel_emerg_mobile
8888880674736793704,c4de399c388097b9a3909bb94cfd51h4,home_emerg_mobile
8888880674736793965,6fab6c3dd609da82acc4b4a48713bdff,regist_mobile
8888880674736793965,c397e3055a5a60566d4a2670979ee0b4,wx_id
8888880674736793965,ws0gnv-ec:01:ee:38:c4:fa,work_wifimac
8888880674736793965,196985b4d3922813b15fe9790c689b1e15dc6,a_zmxy_bindid
8888880674736793965,f4f954dbe1be7bb14643041099a0ed2d,id_no
8888880674736793965,863264039353592,travel_imei
8888880674736793965,ws0gnv-ec:01:ee:38:c4:fa,rest_wifimac
8888880674736793965,7hfL8qWJQGmlH4H-W2Jgbg,travel_oid
//期待輸出數據
8888880674736793965,uid|6fab6c3dd609da82acc4b4a48713bdff,regist_mobile|c397e3055a5a60566d4a2670979ee0b4,wx_id|ws0gnv-ec:01:ee:38:c4:fa,work_wifimac|196985b4d3922813b15fe9790c689b1e15dc6,a_zmxy_bindid|f4f954dbe1be7bb14643041099a0ed2d,id_no|863264039353592,travel_imei|ws0gnv-ec:01:ee:38:c4:fa,rest_wifimac|7hfL8qWJQGmlH4H-W2Jgbg,travel_oid
8888880674736793702,uid|1003422322c6c937c63af250637fb07e89013,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e93,id_no|1003422322c6c937c63af250637fb07e89013,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db3,regist_mobile|c4de399c388097b9a3909bb94cfd51a3,travel_emerg_mobile
8888880674736793704,uid|1003422322c6c937c63af250637fb07e89014,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e94,id_no|1003422322c6c937c63af250637fb07e89014,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db4,regist_mobile|c4de399c388097b9a3909bb94cfd51a4,travel_emerg_mobile|c4de399c388097b9a3909bb94cfd51h4,home_emerg_mobile
8888880674736793701,uid|1003422322c6c937c63af250637fb07e89012,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e9f,id_no|1003422322c6c937c63af250637fb07e89012,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db9,regist_mobile|c4de399c388097b9a3909bb94cfd51a1,travel_emerg_mobile
二、spark-shell scala 實現
#原始本地數據 /home/hadoop/data/source.csv
$SPARK_HOME/bin/spark-shell
//spark 列轉行
//(uid),(uid,value,key)
scala> val lines = sc.textFile("/home/hadoop/data/source.csv").map(_.split(",")).keyBy(a => a(0)).cache()
scala> lines.collect
#打印的內容省略,後續不在強調
#將map中的value數組類型變成字符串類型,用英文逗號鏈接
scala> var rdd1= lines.mapValues(v =>v.mkString(","))
rdd1: org.apache.spark.rdd.RDD[(String, String)] = MapPartitionsRDD[39] at mapValues at <console>:28
scala> rdd1.collect
#groupByKey(uid) 按照key group by
scala> val rdd2 = rdd1.groupByKey();
rdd2: org.apache.spark.rdd.RDD[(String, Iterable[String])] = ShuffledRDD[40] at groupByKey at <console>:30
scala> rdd2.collect
#將分組後的數據改成可識別的形式,打印結果
rdd2.map{ /* 分組,最重要的就是這,同類的數據分組到一起,後面只需要計算V了 */
case (k, v) =>
var vt = "" /* 定義vt存數據的變量,恩,這很java,一般scala中很少見到var */
v.foreach { /* 遍歷需要計算的V */
x =>
val i = x.indexOf(",")
vt ++= x.substring(i+1) + "|"
}
val s=vt.dropRight(1) /*刪除最後一個字符串即|*/
s"$k,uid|$s" /* 拼字符串,返回數據 */
}
.foreach(println)
val rdd3 = rdd2.map{ /* 分組,最重要的就是這,同類的數據分組到一起,後面只需要計算V了 */
case (k, v) =>
var vt = "" /* 定義vt存數據的變量,恩,這很java,一般scala中很少見到var */
v.foreach { /* 遍歷需要計算的V */
x =>
val i = x.indexOf(",")
vt ++= x.substring(i+1) + "|"
}
val s=vt.dropRight(1) /*刪除最後一個字符串即|*/
s"$k,uid|$s" /* 拼字符串,返回數據 */
}
scala> rdd3.collect
#保存結果到本地
scala> rdd3.saveAsTextFile("/home/hadoop/data/adjacent_list.csv")
[hadoop@bigdata-k-01 adjacent_list.csv]$ pwd
/home/hadoop/janusgraph-data/adjacent_list.csv
三、驗證結果
[hadoop@bigdata-k-01 adjacent_list.csv]$ pwd
/home/hadoop/data/adjacent_list.csv
[hadoop@bigdata-k-01 adjacent_list.csv]$ ll
總用量 8
-rw-r--r-- 1 hadoop hadoop 912 3月 23 18:06 part-00000
-rw-r--r-- 1 hadoop hadoop 263 3月 23 18:06 part-00001
-rw-r--r-- 1 hadoop hadoop 0 3月 23 18:06 _SUCCESS
[hadoop@bigdata-k-01 adjacent_list.csv]$ cat *
8888880674736793965,uid|6fab6c3dd609da82acc4b4a48713bdff,regist_mobile|c397e3055a5a60566d4a2670979ee0b4,wx_id|ws0gnv-ec:01:ee:38:c4:fa,work_wifimac|196985b4d3922813b15fe9790c689b1e15dc6,a_zmxy_bindid|f4f954dbe1be7bb14643041099a0ed2d,id_no|863264039353592,travel_imei|ws0gnv-ec:01:ee:38:c4:fa,rest_wifimac|7hfL8qWJQGmlH4H-W2Jgbg,travel_oid
8888880674736793702,uid|1003422322c6c937c63af250637fb07e89013,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e93,id_no|1003422322c6c937c63af250637fb07e89013,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db3,regist_mobile|c4de399c388097b9a3909bb94cfd51a3,travel_emerg_mobile
8888880674736793704,uid|1003422322c6c937c63af250637fb07e89014,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e94,id_no|1003422322c6c937c63af250637fb07e89014,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db4,regist_mobile|c4de399c388097b9a3909bb94cfd51a4,travel_emerg_mobile|c4de399c388097b9a3909bb94cfd51h4,home_emerg_mobile
8888880674736793701,uid|1003422322c6c937c63af250637fb07e89012,a_zmxy_bindid|82a5d0ff83cdd46f8aed902a02105e9f,id_no|1003422322c6c937c63af250637fb07e89012,b_zmxy_bindid|098dcdb655a715d3816ee8d4854f5db9,regist_mobile|c4de399c388097b9a3909bb94cfd51a1,travel_emerg_mobile