Spark 2.x 與 Java 8 下 WordCount 示例

不用 lambda 的基礎版

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Iterator;
public class WordCount implements Serializable {
    public static void main(String[] args) {
        // 輸入文件
        String wordFile = "/user/walker/input/wordcount/idea.txt";
        SparkSession spark = SparkSession.builder()
                .appName("wordcount")
                .config("spark.executor.instances", 10)
                .config("spark.executor.memory", "4g")
                .config("spark.executor.cores", 1)
                .config("spark.hadoop.mapreduce.output.fileoutputformat.compress", false)
                .getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        JavaRDD<String> hdfstext = jsc.textFile(wordFile);
        // 切分
        JavaRDD<String> words = hdfstext.flatMap(new FlatMapFunction<String, String>() {
            public Iterator<String> call(String x) {
                return Arrays.asList(x.split(" ")).iterator();
            }
        });
        // 單次計 1
        JavaPairRDD<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {
            public Tuple2<String, Integer> call(String word) {
                return new Tuple2<>(word, 1);
            }
        });
        // 累加 1
        JavaPairRDD<String, Integer> wordCounts = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
            public Integer call(Integer v1, Integer v2) {
                return v1 + v2;
            }
        }).repartition(1);
        // 輸出目錄
        String outDir = "/user/walker/output/wordcount";
        wordCounts.saveAsTextFile(outDir);
        jsc.close();
    }
}

用 lambda 的基礎版

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
import java.io.Serializable;
import java.util.Arrays;
public class WordCount2 implements Serializable {
    public static void main(String[] args) {
        // 輸入文件
        String wordFile = "/user/walker/input/wordcount/idea.txt";
        SparkSession spark = SparkSession.builder()
                .appName("wordcount")
                .config("spark.executor.instances", 10)
                .config("spark.executor.memory", "4g")
                .config("spark.executor.cores", 1)
                .config("spark.hadoop.mapreduce.output.fileoutputformat.compress", false)
                .getOrCreate();

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        JavaRDD<String> hdfstext = jsc.textFile(wordFile);
        // 切分
        JavaRDD<String> words = hdfstext.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
        // 單次計 1
        JavaPairRDD<String, Integer> pairs = words.mapToPair(word -> new Tuple2<>(word, 1));
        // 累加 1
        JavaPairRDD<String, Integer> wordCounts = pairs.reduceByKey((v1, v2) -> v1 + v2).repartition(1);
        // 輸出目錄
        String outDir = "/user/walker/output/wordcount";
        wordCounts.saveAsTextFile(outDir);
        jsc.close();
    }
}

用 lambda 的變形版

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
import java.io.Serializable;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
public class WordCount3 implements Serializable {
    public static void main(String[] args) {
        // 輸入文件
        String wordFile = "/user/walker/input/wordcount/idea.txt";
        SparkSession spark = SparkSession.builder()
                .appName("wordcount")
                .config("spark.executor.instances", 10)
                .config("spark.executor.memory", "4g")
                .config("spark.executor.cores", 1)
                .config("spark.hadoop.mapreduce.output.fileoutputformat.compress", false)
                .getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        JavaRDD<String> hdfstext = jsc.textFile(wordFile);
        // 切分
        JavaRDD<String> words = hdfstext.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
        // 計數
        Map<String, Long> wordCounts =  words.countByValue(); 
        // 將 Map 轉位 RDD
        List<Tuple2<String, Long>> lst = new LinkedList<>();
        wordCounts.forEach((k, v) -> lst.add(new Tuple2<>(k, v)));
        JavaPairRDD<String, Long> result = jsc.parallelizePairs(lst).repartition(1);;
        // 保存結果到 HDFS
        String outDir = "/user/walker/output/wordcount";    //輸出目錄
        result.saveAsTextFile(outDir);
        jsc.close();
    }
}
本文出自 walker snapshot
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章