hadoop改寫WordCount統計所有單詞個數(去重)

關於wordCount

推薦這個文章,非常清楚->鏈接

思路(不太理解就看一下鏈接的文章)

1.原來的代碼是逐行讀取,然後合併相同單詞,再按順序輸出每個個數,由於我們不知道哪個單詞是結尾,所有我們可以在每一行後面添加一個標識符來表示讀取結束,這樣就可以使程序在讀到標識符後結束。

比如我們用“完”來表示,將它的值設爲-1(這樣和是負數就表示結束):

while (token.hasMoreTokens()) {  
                word.set(token.nextToken());  
                context.write(word, one);  
            } 
Text w=new Text(“完”);
int last=-1;
context.write(w, new IntWritable(last));

那麼當我們這樣改了之後,會多出一個“完”,並且它的值是負數。

2.然後我們可以設置一個全局變量來統計總數

public class wordcount {  
    public static int sum2=0;

3.既然是統計全部個數,也就是多於一個的都算作一個,直到作爲標識符的負數,輸出統計值,因爲“完”在每一行都有一個,每次-1,所以也能統計有多少行

for (IntWritable val : values) {  
     //sum += val.get();
	if(val.get()<0){
		Text z=new Text(“總數:”);
		context.write(new Text(“行數:”), new IntWritable(-sum));
		context.write(z, new IntWritable(sum2));
	}
	sum2++;     
}  

完整代碼

import java.io.IOException;  
import java.util.StringTokenizer;  
import org.apache.hadoop.conf.Configuration;  
import org.apache.hadoop.fs.Path;  
import org.apache.hadoop.io.IntWritable;  
import org.apache.hadoop.io.LongWritable;  
import org.apache.hadoop.io.Text;  
import org.apache.hadoop.mapreduce.Job;  
import org.apache.hadoop.mapreduce.Mapper;  
import org.apache.hadoop.mapreduce.Reducer;  
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;  
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;  
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import org.apache.hadoop.util.GenericOptionsParser;
   
public class wordcount {  
    public static int sum2=0;
    // 自定義的mapper,繼承org.apache.hadoop.mapreduce.Mapper
    public static class WordCountMap extends Mapper<LongWritable, Text, Text, IntWritable> {  
   
        private final IntWritable one = new IntWritable(1);  
        private Text word = new Text();  
         
        //  Mapper<LongWritable, Text, Text, LongWritable>.Context context
        public void map(LongWritable key, Text value, Context context)   throws IOException, InterruptedException {  
            String line = value.toString();  
            System.out.println(line);
            // split 函數是用於按指定字符(串)或正則去分割某個字符串,結果以字符串數組形式返回,這裏按照“\t”來分割text文件中字符,即一個製表符
            // ,這就是爲什麼我在文本中用了空格分割,導致最後的結果有很大的出入
            StringTokenizer token = new StringTokenizer(line);  
            while (token.hasMoreTokens()) {  
                word.set(token.nextToken());  
                context.write(word, one);  
            } 
			Text w=new Text(“完”);
			int last=-1;
			context.write(w, new IntWritable(last)); 
        }  
    }  
   
    // 自定義的reducer,繼承org.apache.hadoop.mapreduce.Reducer
    public static class WordCountReduce extends Reducer<Text, IntWritable, Text, IntWritable> {  
   
        // Reducer<Text, LongWritable, Text, LongWritable>.Context context
        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {  
            System.out.println(key);
            System.out.println(values);
            //int sum = 0;  
            for (IntWritable val : values) {  
                //sum += val.get();
			if(val.get()<0){
			Text z=new Text(“總數:”);
			context.write(new Text(“行數:”), new IntWritable(-sum));
			    context.write(z, new IntWritable(sum2));
			}
			sum2++;     
            }  
            //context.write(key, new IntWritable(sum));  
        }  
    }  
   
    //  客戶端代碼,寫完交給ResourceManager框架去執行
    public static void main(String[] args) throws Exception {  
        Configuration conf = new Configuration();  
        Job job = new Job(conf,"word count"); 
         
        //  打成jar執行
        job.setJarByClass(wordcount.class);     
         
        //  數據在哪裏?
        FileInputFormat.addInputPath(job, new Path(args[0])); 
         
        //  使用哪個mapper處理輸入的數據?
        job.setMapperClass(WordCountMap.class); 
        //  map輸出的數據類型是什麼?
        //job.setMapOutputKeyClass(Text.class);
        //job.setMapOutputValueClass(LongWritable.class);
         
        job.setCombinerClass(IntSumReducer.class);
         
        //  使用哪個reducer處理輸入的數據
        job.setReducerClass(WordCountReduce.class); 
         
        //  reduce輸出的數據類型是什麼?
        job.setOutputKeyClass(Text.class);  
        job.setOutputValueClass(IntWritable.class);  
    
   
//        job.setInputFormatClass(TextInputFormat.class);  
//        job.setOutputFormatClass(TextOutputFormat.class);  
    
        //  數據輸出到哪裏?
        FileOutputFormat.setOutputPath(job, new Path(args[1]));  
   
        //  交給yarn去執行,直到執行結束才退出本程序
        job.waitForCompletion(true);  
         
        /*
        String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
        if(otherArgs.length<2){
            System.out.println("Usage:wordcount <in> [<in>...] <out>");
            System.exit(2);
        }
        for(int i=0;i<otherArgs.length-1;i++){
            FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
        }
        System.exit(job.waitForCompletion(tr0ue)?0:1);
        */
    }  
} 

以上是我的想法,如果你有不同的想法,歡迎評論

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章