//mapreduce程序 import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class WordCount { /** * TokenizerMapper 繼續自 Mapper<LongWritable, Text, Text, IntWritable> * * [一個文件就一個map,兩個文件就會有兩個map] * map[這裏讀入輸入文件內容 以" \t\n\r\f" 進行分割,然後設置 word ==> one 的key/value對] * * @param Object Input key Type: * @param Text Input value Type: * @param Text Output key Type: * @param IntWritable Output value Type: * * Writable的主要特點是它使得Hadoop框架知道對一個Writable類型的對象怎樣進行serialize以及deserialize. * WritableComparable在Writable的基礎上增加了compareT接口,使得Hadoop框架知道怎樣對WritableComparable類型的對象進行排序。 * * @ author liuqingjie * */ public static class TokenizerMapper extends Mapper<LongWritable, Text, Text, IntWritable>{ private final static IntWritable one = new IntWritable( 1 ); private Text word = new Text(); public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } /** * IntSumReducer 繼承自 Reducer<Text,IntWritable,Text,IntWritable> * * [不管幾個Map,都只有一個Reduce,這是一個彙總] * reduce[循環所有的map值,把word ==> one 的key/value對進行彙總] * * 這裏的key爲Mapper設置的word[每一個key/value都會有一次reduce] * * 當循環結束後,最後的確context就是最後的結果. * * @author liuqingjie * */ public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context ) throws IOException, InterruptedException { int sum = 0 ; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); if (args.length != 2 ) { System.err.println( "請配置路徑 " ); System.exit( 2 ); } Job job = new Job(conf, "wordcount" ); job.setJarByClass(WordCount. class ); //主類 job.setMapperClass(TokenizerMapper. class ); //mapper job.setReducerClass(IntSumReducer. class ); //reducer job.setMapOutputKeyClass(Text. class ); //設置map輸出數據的關鍵類 job.setMapOutputValueClass(IntWritable. class ); //設置map輸出值類 job.setOutputKeyClass(Text. class ); //設置作業輸出數據的關鍵類 job.setOutputValueClass(IntWritable. class ); //設置作業輸出值類 FileInputFormat.addInputPath(job, new Path(otherArgs[ 0 ])); //文件輸入 FileOutputFormat.setOutputPath(job, new Path(otherArgs[ 1 ])); //文件輸出 System.exit(job.waitForCompletion( true ) ? 0 : 1 ); //等待完成退出. } } |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
Job job = new Job(conf, "word count" ); job.setJarByClass(WordCount. class ); //主類 job.setMapperClass(TokenizerMapper. class ); //mapper job.setReducerClass(IntSumReducer. class ); //reducer job.setMapOutputKeyClass(Text. class ); //設置map輸出數據的關鍵類 job.setMapOutputValueClass(IntWritable. class ); //設置map輸出值類 job.setOutputKeyClass(Text. class ); //設置作業輸出數據的關鍵類 job.setOutputValueClass(IntWritable. class ); //設置作業輸出值類 FileInputFormat.addInputPath(job, new Path(otherArgs[ 0 ])); //文件輸入 FileOutputFormat.setOutputPath(job, new Path(otherArgs[ 1 ])); //文件輸出 System.exit(job.waitForCompletion( true ) ? 0 : 1 ); //等待完成退出. |