hadoop:MapReduce (Writable)輸出結果中文亂碼解決

hadoop涉及輸出文本的默認輸出編碼統一用沒有BOM的UTF-8的形式,但是對於中文的輸出window系統默認的是GBK,有些格式文件例如CSV格式的文件用excel打開輸出編碼爲沒有BOM的UTF-8文件時,輸出的結果爲亂碼,只能由UE或者記事本打開才能正常顯示。因此將hadoop默認輸出編碼更改爲GBK成爲非常常見的需求。 
自定義 TextOutputFormat.class 子類

TextOutputFormat.class 類代碼展示:

package main.java.util;

import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
/**
 *
 * <p>Title:hadoop MapReduce 輸出結果中文亂碼解決</p>
 * <p> 功能描述:: </p>
 * <p>Company: adteach </p>
 * @version 1.0
 */


@InterfaceAudience.Public
@InterfaceStability.Stable
public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {
  public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";
  protected static class LineRecordWriter<K, V>
    extends RecordWriter<K, V> {
    private static final String utf8 = "UTF-8";  // 將UTF-8轉換成GBK
    private static final byte[] newline;
    static {
      try {
        newline = "\n".getBytes(utf8);
      } catch (UnsupportedEncodingException uee) {
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");
      }
    }

    protected DataOutputStream out;
    private final byte[] keyValueSeparator;

    public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
      this.out = out;
      try {
        this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
      } catch (UnsupportedEncodingException uee) {
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");
      }
    }

    public LineRecordWriter(DataOutputStream out) {
      this(out, "\t");
    }

    /**
     * Write the object to the byte stream, handling Text as a special
     * case.
     * @param o the object to print
     * @throws IOException if the write throws, we pass it on
     */
    private void writeObject(Object o) throws IOException {
      if (o instanceof Text) {
        Text to = (Text) o;   // 將此行代碼註釋掉
        out.write(to.getBytes(), 0, to.getLength());  // 將此行代碼註釋掉
      } else { // 將此行代碼註釋掉
        out.write(o.toString().getBytes(utf8));
      }
    }

    public synchronized void write(K key, V value)
      throws IOException {

      boolean nullKey = key == null || key instanceof NullWritable;
      boolean nullValue = value == null || value instanceof NullWritable;
      if (nullKey && nullValue) {
        return;
      }
      if (!nullKey) {
        writeObject(key);
      }
      if (!(nullKey || nullValue)) {
        out.write(keyValueSeparator);
      }
      if (!nullValue) {
        writeObject(value);
      }
      out.write(newline);
    }

    public synchronized
    void close(TaskAttemptContext context) throws IOException {
      out.close();
    }
  }

  public RecordWriter<K, V>
         getRecordWriter(TaskAttemptContext job
                         ) throws IOException, InterruptedException {
    Configuration conf = job.getConfiguration();
    boolean isCompressed = getCompressOutput(job);
    String keyValueSeparator= conf.get(SEPERATOR, "\t");
    CompressionCodec codec = null;
    String extension = "";
    if (isCompressed) {
      Class<? extends CompressionCodec> codecClass =
        getOutputCompressorClass(job, GzipCodec.class);
      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
      extension = codec.getDefaultExtension();
    }
    Path file = getDefaultWorkFile(job, extension);
    FileSystem fs = file.getFileSystem(conf);
    if (!isCompressed) {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
    } else {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<K, V>(new DataOutputStream
                                        (codec.createOutputStream(fileOut)),
                                        keyValueSeparator);
    }
  }
}

 

  默認的情況下MR主程序中,設定輸出編碼的設置語句爲:
  job.setOutputFormatClass(TextOutputFormat.class);

但是注意:

上述代碼的第48行可以看出hadoop已經限定此輸出格式統一爲UTF-8,因此爲了改變hadoop的輸出代碼的文本編碼只需定義一個和TextOutputFormat相同的類GbkOutputFormat同樣繼承FileOutputFormat 
(注意是 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) 
即可,如下代碼

package main.java.util;


import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
/**
 * 
 * <p>
 * Title: GbkOutputFormat
 * </p>
 * <p>
 * 功能描述::
 * hadoop已經限定此輸出格式統一爲UTF-8,因此爲了改變hadoop的輸出代碼的文本編碼只需定義一個和TextOutputFormat相同的類GbkOutputFormat同樣繼承FileOutputFormat
 * (注意是 org.apache.hadoop.mapreduce.lib.output.FileOutputFormat)
 * </p>
 * <p>
 * Company: adteach
 * </p>
 * @version 1.0
 */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class GbkOutputFormat<K, V> extends FileOutputFormat<K, V> {
  public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";
  protected static class LineRecordWriter<K, V>
    extends RecordWriter<K, V> {
    private static final String utf8 = "GBK";
    private static final byte[] newline;
    static {
      try {
        newline = "\n".getBytes(utf8);
      } catch (UnsupportedEncodingException uee) {
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");
      }
    }

    protected DataOutputStream out;
    private final byte[] keyValueSeparator;

    public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
      this.out = out;
      try {
        this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
      } catch (UnsupportedEncodingException uee) {
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");
      }
    }

    public LineRecordWriter(DataOutputStream out) {
      this(out, "\t");
    }

    /**
     * Write the object to the byte stream, handling Text as a special
     * case.
     * @param o the object to print
     * @throws IOException if the write throws, we pass it on
     */
    private void writeObject(Object o) throws IOException {
      if (o instanceof Text) {
//        Text to = (Text) o;
//        out.write(to.getBytes(), 0, to.getLength());
//      } else {
        out.write(o.toString().getBytes(utf8));
      }
    }

    public synchronized void write(K key, V value)
      throws IOException {

      boolean nullKey = key == null || key instanceof NullWritable;
      boolean nullValue = value == null || value instanceof NullWritable;
      if (nullKey && nullValue) {
        return;
      }
      if (!nullKey) {
        writeObject(key);
      }
      if (!(nullKey || nullValue)) {
        out.write(keyValueSeparator);
      }
      if (!nullValue) {
        writeObject(value);
      }
      out.write(newline);
    }

    public synchronized 
    void close(TaskAttemptContext context) throws IOException {
      out.close();
    }
  }

  public RecordWriter<K, V> 
         getRecordWriter(TaskAttemptContext job
                         ) throws IOException, InterruptedException {
    Configuration conf = job.getConfiguration();
    boolean isCompressed = getCompressOutput(job);
    String keyValueSeparator= conf.get(SEPERATOR, "\t");
    CompressionCodec codec = null;
    String extension = "";
    if (isCompressed) {
      Class<? extends CompressionCodec> codecClass = 
        getOutputCompressorClass(job, GzipCodec.class);
      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
      extension = codec.getDefaultExtension();
    }
    Path file = getDefaultWorkFile(job, extension);
    FileSystem fs = file.getFileSystem(conf);
    if (!isCompressed) {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
    } else {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<K, V>(new DataOutputStream
                                        (codec.createOutputStream(fileOut)),
                                        keyValueSeparator);
    }
  }
}

最後將輸出編碼類型設置成GbkOutputFormat.class,如: 
job.setOutputFormatClass(GbkOutputFormat.class);

 

代碼示例:

package main.java.demo;

import main.java.util.GbkOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;

public class DemoLogETL {

    //使用LongWritable而不是Long是因爲map讀文件寫文件都要序列化分序列化,map處理完結果寫到磁盤,而reduce讀map的結果需要反序列化,所以使用**Writable
    //數據類型使用Writable接口,以便於這些類型定義的數據可以被序列化用於網絡傳輸和文件存儲。
    //靜態內部類(靜態內部類只能訪問外部類的靜態成員)
    public static class WordCountMapper extends Mapper<LongWritable, Text, LongWritable,Text>{ //前兩個輸入參數 後兩個定義輸出
        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            if(null != value){
//                String row = value.toString(); //直接toString()會亂碼
//                String row=new String(value.getBytes(),0,value.getLength(),"GBK");//讀取gbk編碼格式的文件input\jsonLog1.txt
                String row=new String(value.getBytes(),0,value.getLength(),"utf-8");//讀取UTF-8編碼格式的文件input\jsonLog.txt
                String sub = row.substring(20, row.length() - 20);
                context.write(key,new Text(sub));
            }
        }

    }


    //dirver(將dirver提出來了)
    public int run(String[] args) throws Exception {
        //1.get configuration
        Configuration conf = new Configuration();
//        conf.set("mapreduce.framework.name", "local");

        //2.create job
        Job job = Job.getInstance(conf,this.getClass().getSimpleName());
        // run jar
        job.setJarByClass(DemoLogETL.class);

        //3.set job (input -> map -> reduce -> output)
        //3.1 map
        job.setMapperClass(WordCountMapper.class);
        //設置Map端輸出key類和輸出value類
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputFormatClass(GbkOutputFormat.class);

        //3.3
        //input path
        FileInputFormat.addInputPath(job,new Path("input\\jsonLog.txt"));
        //output path
        FileOutputFormat.setOutputPath(job,new Path("DemoLogETL1"));
        //submit job 提交任務
        boolean isSuccess = job.waitForCompletion(true);//表示打印日誌信息
        System.out.println(isSuccess);
        return isSuccess ? 0 : 1;

    }


    // run program 運行整個工程
    public static void main(String[] args) throws Exception{
            int status = new DemoLogETL().run(args);
            // 結束程序
            System.exit(status);
    }

}

參考:https://blog.csdn.net/u014033218/article/details/75413332

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章