SparkStreaming的兩種wordCount

object HelloWord01 {

  def main(args: Array[String]): Unit = {
    //基本配置
    val conf = new SparkConf().setMaster("local[*]").setAppName("HelloWord01")
    val ssc = new StreamingContext(conf, Seconds(3))
    ssc.sparkContext.setLogLevel("WARN")
    //從kafka讀取數據
    val zk = "127.0.0.1:2181"
    val group = "sunpls"
    val topic = "wuhanPolice"
    val numThread = 2
    val topicMap = topic.split(",").map((_, numThread)).toMap
    val dataDStream = KafkaUtils.createStream(ssc, zk, group, topicMap, StorageLevel.MEMORY_AND_DISK_SER)
    //(word,1)
    val word_1 = dataDStream.flatMap {
      case (key, value) => {
        val arr = value.split(" ")
        arr
      }
    }.map {
      case (value) => {
        (value, 1)
      }
    }
    word_1.reduceByKey(_+_).print()
    ssc.start()
    ssc.awaitTermination()

  }

}

有狀態的,會疊加每個分區

object HelloWord02 {

  def main(args: Array[String]): Unit = {
    //基本配置
    val conf = new SparkConf().setMaster("local[*]").setAppName("HelloWord02")
    val ssc = new StreamingContext(conf, Seconds(3))
    ssc.sparkContext.setLogLevel("WARN")
    //由於會保存狀態,一般保存到磁盤文件
    ssc.checkpoint("cp")
    //從kafka讀取數據
    val zk = "127.0.0.1:2181"
    val group = "sunpls2"
    val topic = "wuhanPolice"
    val numThread = 2
    val topicMap = topic.split(",").map((_, numThread)).toMap
    val dataDStream = KafkaUtils.createStream(ssc, zk, group, topicMap, StorageLevel.MEMORY_AND_DISK_SER)
    val word_1: DStream[(String, Int)] = dataDStream.flatMap {
      case (key, value) => {
        val arr = value.split(" ")
        arr
      }
    }.map((_,1))
    //疊加從頭到尾的結果
    word_1.updateStateByKey((seq:Seq[Int],buffer:Option[Int]) => {
      val sum = buffer.getOrElse(0) + seq.sum
      Option(sum)
    }).print()
    ssc.start()
    ssc.awaitTermination()

  }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章