Hadoop(Day07) -- MapReduce(Day 02), Eclipse

一.Version:

1.MapReduce(v1.0):


2.MapReduce(v2.0):



二.Code:

Example: 

1.Counting the data of phone (partition):

FlowBean.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Writable;

public class FlowBean implements Writable{
	
	private long upFlow;
	private long dFlow;
	private long sumFlow;
	
	//反序列化時,需要反射調用空參構造函數,所以要顯示定義一個
	public FlowBean(){}
	
	public long getUpFlow() {
		return upFlow;
	}
	public void setUpFlow(long upFlow) {
		this.upFlow = upFlow;
	}
	public long getdFlow() {
		return dFlow;
	}
	public void setdFlow(long dFlow) {
		this.dFlow = dFlow;
	}
	public long getSumFlow() {
		return sumFlow;
	}
	public void setSumFlow(long sumFlow) {
		this.sumFlow = sumFlow;
	}
	public FlowBean(long upFlow, long dFlow) {
		super();
		this.upFlow = upFlow;
		this.dFlow = dFlow;
		this.sumFlow = upFlow + dFlow;
	}
	
	/**
	 * 序列化方法
	 */
	@Override
	public void write(DataOutput out) throws IOException {
		out.writeLong(upFlow);
		out.writeLong(dFlow);
		out.writeLong(sumFlow);		
	}
	
	/**
	 * 反序列化方法
	 * 注:反序列化的順序跟序列化的順序完全一致
	 */
	@Override
	public void readFields(DataInput in) throws IOException {
		upFlow = in.readLong();
		dFlow = in.readLong();
		sumFlow = in.readLong();
	}

	@Override
	public String toString() {
		return upFlow + "\t" + dFlow + "\t" + sumFlow;
	}
	
	
	
	
}
PhonePartitioner.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.util.HashMap;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

import com.bsr.bigdata.mapreduce.part.FlowBean;

public class PhonePartitioner extends Partitioner<Text, FlowBean>{
	
	public static HashMap<String, Integer> phoneDict = new HashMap<String, Integer>();
	static{
		phoneDict.put("0431", 0);
		phoneDict.put("0451", 1);
		phoneDict.put("0461", 2);
		phoneDict.put("0471", 3);
	}
	
	@Override
	public int getPartition(Text key, FlowBean value, int numPartitions) {
		String prefix = key.toString().substring(0, 4);
		Integer phoneId = phoneDict.get(prefix);
		return phoneId == null?4:phoneId;
	}

}
FlowCount.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FlowCount {
	
	static class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
			//將一行數據轉成String
			String line = value.toString();
			//切分字符
			String[] fields = line.split("\t");
			//手機號
			String phoneNbr = fields[1];
			//取出上行流量
			long upFlow = Long.parseLong(fields[fields.length-3]);
			//取出下行流量
			long dFlow = Long.parseLong(fields[fields.length-2]);
			
			context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
		}
	}
	
	static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
		@Override
		protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException{
			long sum_upFlow = 0;
			long sum_dFlow = 0;
			//遍歷所有的bean 將上行流量和下行流量分別累加
			for(FlowBean bean:values){
				sum_upFlow += bean.getUpFlow();
				sum_dFlow += bean.getdFlow();
			}
			//將所有流量進行彙總
			FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
			context.write(key, resultBean);
		}
	}
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf);
		//指定本程序的jar包所在的本地路徑
		job.setJarByClass(FlowCount.class);
		
		//指定本業務job要是用的Mapper/Reducer業務類
		job.setMapperClass(FlowCountMapper.class);
		job.setReducerClass(FlowCountReducer.class);
		
		//指定我們自定義的數據分區器
		job.setPartitionerClass(PhonePartitioner.class);
		//同時制定相應“分區”數量的reducetask
		job.setNumReduceTasks(5);
		
		//指定job的輸入原始文件所在目錄
		FileInputFormat.setInputPaths(job, new Path("D:\\flow\\in"));
		//指定job的輸出結果所在目錄
		FileOutputFormat.setOutputPath(job, new Path("D\\flow\\in"));
		
		boolean res = job.waitForCompletion(true);
		System.exit(res?0:1);
	}

}


2.WordCountContainer:

package com.bsr.bigdata.mapreduce.combiner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  
/** 
 * 問:爲什麼使用Combiner? 
 * 答:Combiner發生在Map端,對數據進行規約處理,數據量變小了,傳送到reduce端的數據量變小了,傳輸時間變短,作業的整體時間變短。 
 *  
 * 問:爲什麼Combiner不作爲MR運行的標配,而是可選步驟哪? 
 * 答:因爲不是所有的算法都適合使用Combiner處理,例如求平均數。 
 * 
 * 問:Combiner本身已經執行了reduce操作,爲什麼在Reducer階段還要執行reduce操作哪? 
 * 答:combiner操作發生在map端的,處理一個任務所接收的文件中的數據,不能跨map任務執行;只有reduce可以接收多個map任務處理的數據。 
 * 
 */  
public class WordCountCombiner {  
    /** 
     * KEYIN    即k1     表示行的偏移量 
     * VALUEIN  即v1     表示行文本內容 
     * KEYOUT   即k2     表示行中出現的單詞 
     * VALUEOUT 即v2     表示行中出現的單詞的次數,固定值1 
     */  
    static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>{  
        protected void map(LongWritable k1, Text v1, Context context) throws java.io.IOException ,InterruptedException {  
            final String[] splited = v1.toString().split(" ");  
            for (String word : splited) {  
                context.write(new Text(word), new LongWritable(1));  
                System.out.println("Mapper輸出<"+word+","+1+">");  
            }  
        };  
    }  
      
    /** 
     * KEYIN    即k2     表示行中出現的單詞 
     * VALUEIN  即v2     表示行中出現的單詞的次數 
     * KEYOUT   即k3     表示文本中出現的不同單詞 
     * VALUEOUT 即v3     表示文本中出現的不同單詞的總次數 
     * 
     */  
    static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>{  
        protected void reduce(Text k2, java.lang.Iterable<LongWritable> v2s, Context ctx) throws java.io.IOException ,InterruptedException {  
            //顯示次數表示redcue函數被調用了多少次,表示k2有多少個分組  
            System.out.println("MyReducer輸入分組<"+k2.toString()+",...>");  
            long times = 0L;  
            for (LongWritable count : v2s) {  
                times += count.get();  
                //顯示次數表示輸入的k2,v2的鍵值對數量  
                System.out.println("MyReducer輸入鍵值對<"+k2.toString()+","+count.get()+">");  
            }  
            ctx.write(k2, new LongWritable(times));  
        };  
    }  
      
      
    static class MyCombiner extends Reducer<Text, LongWritable, Text, LongWritable>{  
        protected void reduce(Text k2, java.lang.Iterable<LongWritable> v2s, Context ctx) throws java.io.IOException ,InterruptedException {  
            //顯示次數表示redcue函數被調用了多少次,表示k2有多少個分組  
            System.out.println("Combiner輸入分組<"+k2.toString()+",...>");  
            long times = 0L;  
            for (LongWritable count : v2s) {  
                times += count.get();  
                //顯示次數表示輸入的k2,v2的鍵值對數量  
                System.out.println("Combiner輸入鍵值對<"+k2.toString()+","+count.get()+">");  
            }  
              
            ctx.write(k2, new LongWritable(times));  
            //顯示次數表示輸出的k2,v2的鍵值對數量  
            System.out.println("Combiner輸出鍵值對<"+k2.toString()+","+times+">");  
        };  
    }   
    public static void main(String[] args) throws Exception {  
        Configuration conf = new Configuration();  
        
 
        Job job = Job.getInstance(conf);
          
        //指定如何對輸入文件進行格式化,把輸入文件每一行解析成鍵值對  
        //job.setInputFormatClass(TextInputFormat.class);  
          
        //1.2 指定自定義的map類  
        job.setMapperClass(MyMapper.class);  
        //map輸出的<k,v>類型。如果<k3,v3>的類型與<k2,v2>類型一致,則可以省略  
        job.setMapOutputKeyClass(Text.class);  
        job.setMapOutputValueClass(LongWritable.class);  
         
         
        //1.5 規約  
        job.setCombinerClass(MyCombiner.class);  
          
        //2.2 指定自定義reduce類  
        job.setReducerClass(MyReducer.class);  
        //指定reduce的輸出類型  
        job.setOutputKeyClass(Text.class);  
        job.setOutputValueClass(LongWritable.class);  
          
       //指定job的輸入原始文件所在目錄
       FileInputFormat.setInputPaths(job, new Path("D:\\word\\in"));
       //指定job的輸出結果所在目錄
       Path outPath = new Path("D:\\word\\out");
       
       FileOutputFormat.setOutputPath(job, outPath);
      
       FileSystem fs = FileSystem.get(conf);
      
       if(fs.exists(outPath)){  
           fs.delete(outPath, true);  
       }  
      		
      //將job中配置的相關參數,以及job所用的java類所在的jar包,提交給yarn去運行
      		/*job.submit();*/
       boolean res = job.waitForCompletion(true);
       System.exit(res?0:1);
    }  
}  



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章