Hadoop之MapReduce-自定義排序編程

一、問題描述

        根據給出的數據計算每一個賬戶總的收入,總的支出以及總利潤,並按照總利潤由高到低排序,如果總利潤相同,則按照總的支出由低到高排序。

二、數據格式

       2.1輸入數據格式
       [email protected]    6000    0    2014-02-20
       [email protected]    0    1000    2014-02-20
       [email protected]    2000    1000    2014-02-20
       [email protected]    10000    9000    2014-02-20
       [email protected]    100    0    2014-02-20
       [email protected]    6000    2000    2014-02-20

       2.2輸出數據格式 
       [email protected]    6000.0    1000.0    5000.0
       [email protected]    6000.0    2000.0    4000.0
       [email protected]    12000.0    10000.0    2000.0
       [email protected]    100.0    0.0    100.0

三、問題實現

        第一步:將每個賬戶的總的收入,總的支出以及總利潤計算輸出到HDFS。【默認按照數據字典排序】

        第二步:將輸出的結果自定義排序。

        類InforBean

package edu.jianwei.hadoop.mr.sort;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

public class InfoBean implements WritableComparable<InfoBean> {

	private String account;

	private double income;

	private double expenses;

	private double profit;

	public void set(String account, double income, double expenses) {
		this.account = account;
		this.income = income;
		this.expenses = expenses;
		this.profit = income - expenses;
	}

	@Override
	public String toString() {
		return this.income + "\t" + this.expenses + "\t" + this.profit;
	}

	/**
	 * serialize
	 */
	public void write(DataOutput out) throws IOException {
		out.writeUTF(account);
		out.writeDouble(income);
		out.writeDouble(expenses);
		out.writeDouble(profit);
	}

	/**
	 * deserialize
	 */
	public void readFields(DataInput in) throws IOException {
		this.account = in.readUTF();
		this.income = in.readDouble();
		this.expenses = in.readDouble();
		this.profit = in.readDouble();
	}

	public int compareTo(InfoBean o) {
        if (this.profit == o.getProfit()) {
            return this.expenses > o.getExpenses() ? 1 : -1;
        } else {
            return this.profit > o.getProfit() ? -1 : 1;
        }
        }

	public String getAccount() {
		return account;
	}

	public void setAccount(String account) {
		this.account = account;
	}

	public double getIncome() {
		return income;
	}

	public void setIncome(double income) {
		this.income = income;
	}

	public double getExpenses() {
		return expenses;
	}

	public void setExpenses(double expenses) {
		this.expenses = expenses;
	}

	public double getProfit() {
		return profit;
	}

	public void setProfit(double profit) {
		this.profit = profit;
	}

}

         類SumStep:
package edu.jianwei.hadoop.mr.sort;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class SumStep {
	static class SumMapper extends Mapper<LongWritable, Text, Text, InfoBean> {
		public Text k = new Text();
		public InfoBean v = new InfoBean();

		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] str = line.split("\t");
			String account = str[0];
			double income = Double.parseDouble(str[1]);
			double expenses = Double.parseDouble(str[2]);
			k.set(account);
			v.set(account, income, expenses);
			context.write(k, v);
		}

	}

	static class SumReducer extends Reducer<Text, InfoBean, Text, InfoBean> {

		public InfoBean v = new InfoBean();

		@Override
		protected void reduce(Text key, Iterable<InfoBean> values,
				Context context) throws IOException, InterruptedException {
			double total_inclome = 0;
			double total_expenses = 0;
			for (InfoBean v : values) {
				total_inclome += v.getIncome();
				total_expenses += v.getExpenses();
			}
			v.set(null, total_inclome, total_expenses);
			context.write(key, v);
		}
	}

	public static void main(String[] args) throws IllegalArgumentException,
			IOException, InterruptedException, ClassNotFoundException {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf);

		job.setJarByClass(SumStep.class);

		job.setMapperClass(SumMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(InfoBean.class);
		FileInputFormat.setInputPaths(job, new Path(args[0]));

		job.setReducerClass(SumReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(InfoBean.class);
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);
	}

}
        

        類SortStep:

package edu.jianwei.hadoop.mr.sort;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class SortStep {

	public static void main(String[] args) throws IOException,
			InterruptedException, ClassNotFoundException {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf);

		job.setJarByClass(SortStep.class);

		job.setMapperClass(SortMapper.class);
		job.setMapOutputKeyClass(InfoBean.class);
		job.setMapOutputValueClass(NullWritable.class);
		FileInputFormat.setInputPaths(job, new Path(args[0]));

		job.setReducerClass(SortReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(InfoBean.class);
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);
	}

	public static class SortMapper extends
			Mapper<LongWritable, Text, InfoBean, NullWritable> {
		public InfoBean k = new InfoBean();

		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] strs = line.split("\t");
			String account = strs[0];
			double income = Double.parseDouble(strs[1]);
			double expenses = Double.parseDouble(strs[2]);
			k.set(account, income, expenses);
			context.write(k, NullWritable.get());
		}
	}

	public static class SortReducer extends
			Reducer<InfoBean, NullWritable, Text, InfoBean> {
		public Text k = new Text();

		@Override
		protected void reduce(InfoBean bean, Iterable<NullWritable> v2s,
				Context context) throws IOException, InterruptedException {
			String account = bean.getAccount();
			k.set(account);
			context.write(k, bean);
		}

	}

}

四、輸出結果

        第一步:將每個賬戶的總的收入,總的支出以及總利潤計算輸出到HDFS。     

        1.代碼運行 

          hadoop jar /root/sort.jar edu.jianwei.hadoop.mr.sort.SumStep /sort  /sort/sum

        2.輸出結果

        [email protected]    100.0    0.0    100.0
        [email protected]    12000.0    10000.0    2000.0
        [email protected]    6000.0    2000.0    4000.0
        [email protected]    6000.0    1000.0    5000.0

        第二步:將輸出的結果自定義排序。

       1.代碼運行 

         hadoop jar /root/sort.jar edu.jianwei.hadoop.mr.sort.SortStep /sort/sum  /sort/sortRes、

        2.輸出結果

        [email protected]    6000.0    1000.0    5000.0
        [email protected]    6000.0    2000.0    4000.0
        [email protected]    12000.0    10000.0    2000.0
        [email protected]    100.0    0.0    100.0






發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章