MainTest.java
package MyinputFormat;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* @author carssun 2018年3月19日下午6:41:39
*自定義outputformat
*/
public class MainTest extends Configured implements Tool {
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new MainTest(), args);
System.exit(exitCode);
}
private static Text filename;
public static class MyInMapper extends Mapper<NullWritable, Text, Text, Text>{
@Override
protected void setup(Mapper<NullWritable, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
InputSplit split=context.getInputSplit();
Path filePath=((FileSplit)split).getPath();
filename=new Text(filePath.toString());//記得試一下換成getNames
}
@Override
protected void map(NullWritable key, Text value, Mapper<NullWritable, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
context.write(filename, value);
System.err.println("讀了一次");
}
}
//reduce原樣輸出即可
public static class MyInReducer extends Reducer<Text, Text, Text, NullWritable>{
@Override
protected void reduce(Text key, Iterable<Text> value, Context context)
throws IOException, InterruptedException {
context.write(value.iterator().next(),NullWritable.get());//不用循環 一個key只對應一個value
}
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(MainTest.class);
job.setMapperClass(MyInMapper.class);
job.setReducerClass(MyInReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
Path inputPath=new Path("D:/result/small");
Path outputPath=new Path("D:/result/small/output");
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
job.setNumReduceTasks(0);
job.setInputFormatClass(WholeInputFormat.class);
FileSystem fs=FileSystem.get(conf);
if(fs.exists(outputPath)){
fs.delete(outputPath,true);
}
int status = job.waitForCompletion(true) ? 0 : 1;
return status;
}
}
MyReaderRecorder
package MyinputFormat;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
/**
* @author lhx 2018年3月19日下午8:37:16
*
*/
public class MyReaderRecorder extends RecordReader<NullWritable, Text>{
//四個成員變量
private Configuration conf=null;//配置
private FileSplit split=null;//文件切片
private boolean process=false;//記錄進度
private Text value=new Text();
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
conf=context.getConfiguration();
this.split=(FileSplit)split;
}
// nextKeyValue()方法是RecordReader最重要的方法,也就是RecordReader讀取文件的讀取邏輯所在地
// 所以我們要自定義RecordReader,就需要重寫nextKeyValue()的實現
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if(!process){
//創建緩衝區
byte[] contents=new byte[(int)split.getLength()];//文件的確可能超過int最大值
//這裏的強轉也是迫不得已 如果超過int最大值 就可以不考慮整個文件一起切
FileSystem fs=FileSystem.get(conf);
Path filePath=split.getPath();//通過filesplit切片獲取文件路勁
FSDataInputStream inputStream=fs.open(filePath);//hdfs文件讀流
//讀取文件
IOUtils.readFully(inputStream, contents, 0, contents.length);
//把文件內容都讀取到緩緩衝區後 再把內容寫到value
value.set(contents,0,contents.length);
IOUtils.closeStream(inputStream);//關閉輸入流
process=true;//進度完成
return true;
}
return false;
}
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public Text getCurrentValue() throws IOException, InterruptedException {
// TODO Auto-generated method stub
return value;
}
@Override
public float getProgress() throws IOException, InterruptedException {
// TODO Auto-generated method stub
return process?0f:1f;//進度 要不就是100%要不就是0%
}
@Override
public void close() throws IOException {
// do nothing
}
}
WholeInputFormat
package MyinputFormat;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public class WholeInputFormat extends FileInputFormat<NullWritable, Text>{
@Override
protected boolean isSplitable(JobContext context, Path filename) {//設置成不可再切分
return false;
}
@Override
public RecordReader<NullWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
MyReaderRecorder reader=new MyReaderRecorder();
reader.initialize(split, context);
return reader;
}
}