195 Spark Streaming整合Kafka完成網站點擊流實時統計

在這裏插入圖片描述
1.安裝並配置zk

2.安裝並配置Kafka

3.啓動zk

4.啓動Kafka

5.創建topic

bin/kafka-topics.sh --create --zookeeper node1.itcast.cn:2181,node2.itcast.cn:2181 \
--replication-factor 3 --partitions 3 --topic urlcount

6.編寫Spark Streaming應用程序

package cn.itcast.spark.streaming

package cn.itcast.spark

import org.apache.spark.{HashPartitioner, SparkConf}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

object UrlCount {
  val updateFunc = (iterator: Iterator[(String, Seq[Int], Option[Int])]) => {
    iterator.flatMap{case(x,y,z)=> Some(y.sum + z.getOrElse(0)).map(n=>(x, n))}
  }

  def main(args: Array[String]) {
    //接收命令行中的參數
    val Array(zkQuorum, groupId, topics, numThreads, hdfs) = args
  
    //創建SparkConf並設置AppName
    val conf = new SparkConf().setAppName("UrlCount")
   
    //創建StreamingContext
    val ssc = new StreamingContext(conf, Seconds(2))
   
    //設置檢查點
    ssc.checkpoint(hdfs)
   
    //設置topic信息
    val topicMap = topics.split(",").map((_, numThreads.toInt)).toMap
 
    //重Kafka中拉取數據創建DStream
    val lines = KafkaUtils.createStream(ssc, zkQuorum ,groupId, topicMap, StorageLevel.MEMORY_AND_DISK).map(_._2)
  
    //切分數據,截取用戶點擊的url
    val urls = lines.map(x=>(x.split(" ")(6), 1))
   
    //統計URL點擊量
    val result = urls.updateStateByKey(updateFunc, new HashPartitioner(ssc.sparkContext.defaultParallelism), true)
 
    //將結果打印到控制檯
    result.print()
    ssc.start()
    ssc.awaitTermination()
  }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章