Hadoop WorldCount程序

Hadoop WorldCount程序

--aaa.txt
hello world hadoop
hello lhj hadoop
good luck to lhj
nice to me lhj

--Mapper
import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;


public class WcMapper extends Mapper<LongWritable, Text, Text, IntWritable>{

	@Override
	protected void map(LongWritable key, Text value,Context context)
			throws IOException, InterruptedException {
		String line=value.toString();
		StringTokenizer st=new StringTokenizer(line);
		while(st.hasMoreTokens()){
			String world=st.nextToken();
			context.write(new Text(world), new IntWritable(1));
		}
	}
	
}


--Reducer
import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;


public class WcReduce extends Reducer<Text, IntWritable, Text, IntWritable> {

	@Override
	protected void reduce(Text key, Iterable<IntWritable> iterable,Context context)
			throws IOException, InterruptedException {
		int sum=0;
		for(IntWritable i:iterable){
			sum=sum+i.get();
		}
		context.write(key, new IntWritable(sum));
	}
	
}



--JobRun

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class JobRun {
	public static void main(String[] args) {
		Configuration conf=new Configuration();
//		conf.set("fs.default.name", "hdfs://node1:9000");
//		conf.set("mapred.job.tracker", "node1:9001");
//		conf.set("mapred.jar", "C:\\Documents and Settings\\Administrator\\桌面");

		try {
			Job job=new Job(conf);
			job.setJarByClass(JobRun.class);
			job.setMapperClass(WcMapper.class);
			job.setReducerClass(WcReduce.class);
			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(IntWritable.class);
			
			job.setNumReduceTasks(1);//默認也是1
			FileInputFormat.addInputPath(job, new Path("/user/hadoop/input/wc"));
			FileOutputFormat.setOutputPath(job, new Path("/user/hadoop/output/wc"));
			System.exit(job.waitForCompletion(true) ? 0:1);
			
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}



--上傳jar包
hadoop fs -put wc.jar /user/hadoop/input/wc


--hadoop中執行
hadoop jar wc.jar JobRun       //wc.jar是導出的jar包,如果有包的話要加上包名, JobRun是有main方法的類


--監控執行的進度
http://node1:50030/jobtracker.jsp


[hadoop@node1 ~]$ hadoop fs -cat /user/hadoop/output/wc/part-r-00000
good    1
hadoop  2
hello   2
lhj     3
luck    1
me      1
nice    1
to      2
world   1

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章