Spark自定義Part-00001

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by maokm on 2017/7/20.
  */
import org.apache.hadoop.mapred.lib.MultipleTextOutputFormat

class RDDMultipleTextOutputFormat extends MultipleTextOutputFormat[Any, Any] {
  override def generateFileNameForKeyValue(key: Any, value: Any, name: String): String =
    key.asInstanceOf[String]
}
object test {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("test").setMaster("local[2]")
    val sc = new SparkContext(conf)
   val arr = Array("hadoop","hadoop","spark","hadoop","hello","work","spark")
    val word = sc.parallelize(arr)
    val tuples = word.flatMap(_.split(",")).map((_,1)).reduceByKey(_ + _)
    tuples.saveAsHadoopFile("E://out",classOf[String],classOf[Integer],classOf[RDDMultipleTextOutputFormat])
    sc.stop()
  }
}

   RDDMultipleTextOutputFormat類中的generateFileNameForKeyValue函數有三個參數,key和value是我們的key和value,name參數是每個Reduce的編號。

   結果目錄文件格式

   

 

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章