以下是今天的項目實例及作業(用Scala寫cogroup寫一個實例)
package com.dt.spark.cores
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by chenjh on 2016/1/22.
*/
object Tranformations {
//main方法中調用的每一個功能,必須是每個模塊可以使用函數封裝
def main(args: Array[String]) {
val sc = sparkContext("Transformation Operations")
//mapTranformation(sc)
//filterTransformation(sc)
//flatMapTransformation(sc)
//groupByKeyTransformation(sc)
//reduceByKeyTransformation(sc)
//joinTransformation(sc)
//作業
cogroupTransformation(sc)
//停止SparkContext, 銷燬相關的Driver對象,釋放資源
sc.stop()
}def sparkContext(name: String) = {
//創建SparkConf,初始化程序的配置
val conf = new SparkConf().setAppName("Transformations").setMaster("local")
//創建SparkContext,這是第一個RDD創建的唯一入口,也是Driver的靈魂,是通往集羣的唯一通道
val sc = new SparkContext(conf)
sc
}
def mapTranformation(sc:SparkContext){
val nums = sc.parallelize(1 to 10)
//循環遍歷每個元素
//map適合於任何類型的元素,且對其作用的集合中的每一個元素循環遍歷,並調用其作爲參數的函數對每一個遍歷的元素進行具體化處理:
val mapped = nums.map(item => 2 * item)
//收集計算結果並通過foreach循環打印
mapped.collect().foreach(println)
}
def filterTransformation(sc: SparkContext): Unit = {
//根據集合創建RDD
val nums = sc.parallelize(1 to 20)
//filter 中作爲參數的函數的Boolean來判斷符合條件的元素,並基於這些元素構建新的MapPartitionRDD
val filtered = nums.filter(item => item % 2 ==0)
filtered.collect().foreach(println)// 收集計算結果並通過 foreach循環打印
}
def flatMapTransformation(sc: SparkContext) {
//實例化字符串類型的Array
val bigData = Array("Scala Spark", "Java Hadoop", "Java Tachyon")
//創建以字符串元素類型的parallelCollectionRDD
val bigDataString = sc.parallelize(bigData)
//首先是通過傳入的作爲參數的函數來作用於RDD的每個字符串進行單詞切分,是以集合的方式存在{Scala Spark Java Hadoop Java Tachyon}
val words = bigDataString.flatMap(line => line.split(" "))
words.collect().foreach(println)
}
def groupByKeyTransformation(sc: SparkContext) {
//準備數據
val data = Array(
Tuple2(100,"Spark"),
Tuple2(100,"Tachyon"),
Tuple2(70,"Hadoop"),
Tuple2(80,"Kafka"),
Tuple2(70,"HBase"))
//創建RDD
val dataRdd = sc.parallelize(data)
//按照相同的key對Value進行分組,分組後的Value是一個集合
val grouped = dataRdd.groupByKey()
//收集計算結果並通過foreach循環打印
grouped.collect().foreach(println)
}
def reduceByKeyTransformation(sc : SparkContext){
//val sc = new SparkContext(conf)
val lines = sc.textFile("C://SharedFolder//TextLines.txt",1)//並行度是1
val words = lines.flatMap{line => line.split(" ")}
val pairs = words.map {word =>(word,1)}
val wordCounts = pairs.reduceByKey(_+_)
wordCounts.collect.foreach(wordNumberPair => println(wordNumberPair._1 + " : " + wordNumberPair._2))
}
def joinTransformation(sc : SparkContext) {
val studentNames= Array(
Tuple2(1,"Spark"),
Tuple2(2,"Tachyon"),
Tuple2(3,"Hadoop")
)
val studentScores= Array(
Tuple2(1,100),
Tuple2(2,95),
Tuple2(3,70)
)
val names =sc.parallelize(studentNames)
val scores = sc.parallelize(studentScores)
val studentNameAndScore = names.join(scores)
studentNameAndScore.collect.foreach(println)
}
def cogroupTransformation(sc: SparkContext): Unit = {
val studentNames= Array(
Tuple2(1,"Spark"),
Tuple2(2,"Tachyon"),
Tuple2(3,"Hadoop")
)
val studentScores= Array(
Tuple2(1,100),
Tuple2(2,95),
Tuple2(3,70),
Tuple2(1,80),
Tuple2(3,89)
)
val names =sc.parallelize(studentNames)
val scores = sc.parallelize(studentScores)
val studentNameAndScore = names.cogroup(scores)
//打印輸出
studentNameAndScore.collect.foreach(println)
}
}
打印結果:
(1,(CompactBuffer(Spark),CompactBuffer(100, 80)))
(3,(CompactBuffer(Hadoop),CompactBuffer(70, 61, 89)))
(2,(CompactBuffer(Tachyon),CompactBuffer(95)))
新浪微博: http://weibo.com.ilovepains/
微信公共號:DT_Spark
博客:http://bolg.sina.com.cn/ilovepains
手機:18610086859
qq:1740415547
郵箱:[email protected]