Hadoop啓蒙demo——單詞統計WordCount(Intellij IDEA版)

寫在代碼之前

在網上搜了很多都說要打包jar,或者需要在環境變量中寫入,比較繁瑣。但是筆者寫的這個代碼也是能直接跑的,正常輸出結果。主要是要有下面這行代碼,會在控制檯打印出運行結果並在完成後退出,這樣看着比較直觀一點。

System.exit(job.waitForCompletion(true) ? 0 : 1);

Mapper類

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.util.Arrays;

public class MapperTest extends Mapper<LongWritable
        , Text, Text, IntWritable> {
    /**
     * @param key     文本的行號
     * @param value   待統計單詞的文本
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //得到文件的文本,每一行爲一個value
        String text = value.toString();
        String[] words = text.split(" ");
        //將每個單詞進行統計 出現次數爲1
        Arrays.stream(words).forEach(
                word -> {
                    try {
                        context.write(new Text(word), new IntWritable(1));
                    } catch (IOException e) {
                        e.printStackTrace();
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
        );
    }
}

Reducer

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class ReducerTest extends Reducer<Text, IntWritable, Text, IntWritable> {
    /**
     * @param key     單詞
     * @param values  對應頻數
     * @param context
     */
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //統計頻數之和
        int sum = 0;
        for (IntWritable count : values) {
            sum += count.get();
            context.write(key, new IntWritable(sum));
        }
    }
}

啓動類主函數

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class Runner {
    private static final String inpath = "D:\\codeWareCollections\\hadoop-2.9.1\\input\\README.txt";

    public static void main(String[] args) {
        Configuration conf = new Configuration();
        try {
            System.setProperty("hadoop.home.dir", "D:\\codeWareCollections\\hadoop-2.9.1");
            Job job = Job.getInstance(conf, "first Hadoop");
            job.setJarByClass(Runner.class);
            job.setMapperClass(MapperTest.class);
            job.setReducerClass(ReducerTest.class);

            // 輸出key類型
            job.setOutputKeyClass(Text.class);
            // 輸出value類型
            job.setOutputValueClass(IntWritable.class);
            //設置輸入輸出路徑
            Path inputPath = new Path(inpath);
            FileInputFormat.addInputPath(job, inputPath);
            FileOutputFormat.setOutputPath(job, new Path("D:\\output"));
            System.exit(job.waitForCompletion(true) ? 0 : 1);

        } catch (IOException e) {
            e.printStackTrace();

        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章