(1)在文件夾下寫定一個hello.txt文件。
python hello
java python
c++ java
python php
(2)然後編寫一個入門級的mapreduce程序。一個mapreduce程序分爲Mapper、Reducer、Driver。
本程序使用maven.pom.xml代碼如下。
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
</dependencies>
(3)在項目的src/main/resource下面新建一個文件。名爲“log4j.properties”
代碼如下:
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
(4)編寫Mapper程序。
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WcMapper extends Mapper<LongWritable,Text,Text,IntWritable> {
//偏移量:距離開始有多少字符。
//輸入的內容是LongWritable,Text。就是輸入內容的偏移量和這一行內容
//這樣可以避免大量new對象
private Text word=new Text();
private IntWritable one=new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//拿到這一行數據
String line=value.toString();
//按照空格切分數據
String[] words=line.split(" ");
//遍歷數組,把單詞變成(word,1)的形式輸出給框架
for(String word:words)
{
this.word.set(word);
context.write(this.word,this.one);
}
}
}
(5)編寫Reducer程序。
package comsk;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WcReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable total=new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
//做累加
int sum=0;
for(IntWritable value:values)
{
sum += value.get();
}
//包裝結果並輸出
total.set(sum);
context.write(key,total);
}
}
(6)編寫driver類。
package com.sk.flow;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException,InterruptedException,ClassNotFoundException
{
//1.獲取job實例
Job job=Job.getInstance(new Configuration());
//2.設置類路徑
job.setJarByClass(FlowDriver.class);
//3設置Mapper和Reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
//4.設置輸入輸出key,value
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//5設置輸入輸出路徑
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交
boolean b=job.waitForCompletion(true);
System.exit(b?0:1);
}
}
運行後點擊:
在如上位置寫輸入和輸出。
這是mapreduce一個入門級的程序,一定要理解每一步是幹嘛的。