WordCount源代碼如下:
- package org.apache.hadoop.examples;
- import java.io.IOException;
- import java.util.StringTokenizer;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.IntWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.Reducer;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- import org.apache.hadoop.util.GenericOptionsParser;
- public class WordCount {
- public static class TokenizerMapper
- extends Mapper<Object, Text, Text, IntWritable>{
- private final static IntWritable one = new IntWritable(1);
- private Text word = new Text();
- public void map(Object key, Text value, Context context
- ) throws IOException, InterruptedException {
- StringTokenizer itr = new StringTokenizer(value.toString());
- while (itr.hasMoreTokens()) {
- word.set(itr.nextToken());
- context.write(word, one);
- }
- }
- }
- public static class IntSumReducer
- extends Reducer<Text,IntWritable,Text,IntWritable> {
- private IntWritable result = new IntWritable();
- public void reduce(Text key, Iterable<IntWritable> values,
- Context context
- ) throws IOException, InterruptedException {
- int sum = 0;
- for (IntWritable val : values) {
- sum += val.get();
- }
- result.set(sum);
- context.write(key, result);
- }
- }
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- conf.set("mapred.job.tracker", "192.168.80.100:9001");
- String[] ars=new String[]{"in","newout"};
- String[] otherArgs = new GenericOptionsParser(conf, ars).getRemainingArgs();
- if (otherArgs.length != 2) {
- System.err.println("Usage: wordcount <in> <out>");
- System.exit(2);
- }
- Job job = new Job(conf, "wordcount");
- job.setJarByClass(WordCount.class);
- job.setMapperClass(TokenizerMapper.class);
- job.setCombinerClass(IntSumReducer.class);
- job.setReducerClass(IntSumReducer.class);
- job.setOutputKeyClass(Text.class);
- job.setOutputValueClass(IntWritable.class);
- FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
- FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
- System.exit(job.waitForCompletion(true) ? 0 : 1);
- }
- }
運行run on hadoop後報錯內容如下:
13/12/22 18:49:11 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/12/22 18:49:11 INFO mapred.JobClient: Cleaning up the staging area hdfs://hadoop001:9000/usr/local/hadoop/tmp/mapred/staging/root/.staging/job_201312221708_0002
13/12/22 18:49:11 ERROR security.UserGroupInformation: PriviledgedActionException as:root cause:org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: file:/E:/hadoop/eclipse/workspace/WordCount/in
Exception in thread "main" org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: file:/E:/hadoop/eclipse/workspace/WordCount/in
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:235)
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:252)
at org.apache.hadoop.mapred.JobClient.writeNewSplits(JobClient.java:1024)
at org.apache.hadoop.mapred.JobClient.writeSplits(JobClient.java:1041)
at org.apache.hadoop.mapred.JobClient.access$700(JobClient.java:179)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:959)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:912)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Unknown Source)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:912)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:500)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
at org.apache.hadoop.examples.WordCount.main(WordCount.java:69)
解決辦法:
搗鼓了一整天終於找到解決的辦法了,原來是我之前參考的《hadoop集羣第七期》http://www.cnblogs.com/xia520pi/archive/2012/05/20/2510723.html
注意第52和53行,然後運行的時候老是出錯,如前面所述的錯誤,然後百度了半天才知道是hadoop-core-1.1.2.jar這個包的問題,org.apache.hadoop.fs裏面的一個類名爲
FileUtl.class中的一個方法checkReturnValue搞的鬼,這個方法是檢查Windows下文件權限問題,在Linux下可以正常運行,不存在這樣的問題。
因此將此方法註釋掉就OK了,但是爲了方便,還是從網上重新找了hadoop-core-1.1.2.jar下載下來,將原先的hadoop-core-1.1.2.jar替換掉,然後再在eclipse裏面運行
wordcount.java就成功了。
紀念一下:
總結:網上別人寫的東西有好有壞,得學會親自動手解決。這裏我留下代碼修改後的hadoop-core-1.1.2.jar的下載地址,有需要的童鞋可以下載替換原先的jar包試試,嘿嘿!