七、MapReduce第七講合表(Join操作)
通俗的講就是把兩個文件的內容合到一塊。話不多說,我直接上案列
一、準備兩個數據文件:
data.txt:
201001 1003 abc
201002 1005 def
201003 1006 ghi
201004 1003 jkl
201005 1004 mno
201006 1005 pqr
info.txt:
1003 kaka
1004 da
1005 jue
1006 zhao
得出的數據文件:
part-r-00000:
代碼如下:
package Join;
import java.io.IOException;
import java.util.Vector;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class join {
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(join.class);
job.setMapperClass(MMapper.class);
job.setReducerClass(MReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.addInputPath(job, new Path(args[1]));
FileOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
public static class MMapper extends Mapper<LongWritable, Text, Text, Text>{
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
//獲取路勁信息
FileSplit inputSplit = (FileSplit) context.getInputSplit();
String path = inputSplit.getPath().toString();
//通過if進行判斷輸入的路勁是否包含data.txt
if (path.contains("data.txt")) {
//轉換數據類型並用製表符進行切割
String[] line = value.toString().split("\t");
//取出數據第一位 1003
String joinkey = line[1];
//然後把需要跟第二個文件合併的內容給提取出來,並用“data標記”
String val = "data"+line[0]+"\t"+line[1]+"\t"+line[2];
//開始寫入
context.write(new Text(joinkey), new Text(val));
}
if (path.contains("info.txt")) {
String[] line = value.toString().split("\t");
//取出數據第0位 1003
String joinkey = line[0];
String val = "info"+line[1];
context.write(new Text(joinkey), new Text(val));
}
}
}
public static class MReduce extends Reducer<Text, Text, Text, Text>{
@Override
protected void reduce(Text key, Iterable<Text> value, Context context)
throws IOException, InterruptedException {
//創建兩個向量
Vector<String> vectora = new Vector<String>();
Vector<String> vectorb= new Vector<String>();
//判斷兩個輸入數據的value,分別加入上面兩個集合
for (Text v : value) {
String line = v.toString();
//判斷開頭標記,放入集合
if (line.startsWith("data")) {
vectora.add(line.substring("data".length()));
}
if (line.startsWith("info")) {
vectorb.add(line.substring("info".length()));
}
}
//將兩個合集進行拼接,笛卡爾積
for (String a : vectora) {
for (String b : vectorb) {
context.write(new Text(a), new Text(b));
}
}
}
}
}
本次教程就到次結束,有什麼不懂的多多在下方進行評論,博主有時間的話會在下方進行回答的。多多支持博主。
下期見!!!