MapReduce II

排序

1.部分排序
    默認.
2.全排序
    1.一個reduce
    2.自定義分區類
        可能會產生數據傾斜。
    3.使用hadoop內置的全排序分區類。
        採樣.
        分區文件(sequencefile)。

3.二次排序
    對value進行排序。
    value做到key中。合成key.

數據傾斜

大量數據涌向到一個或者幾個reduce,造成大量的reduce空閒。
reduce個數程序決定.

連接

[sql]
1.交叉連接
    select a.*,b.* from customers a cross join orders b ;

2.笛卡爾積
    select a.*,b.* from customers a , orders b ;

3.內連接
    select a.*,b.* from customers a inner join orders b on a.id = b.cid ;

4.左外鏈接
    select a.*,b.* from customers a left outer join orders b on a.id = b.cid ;

5.右外連接
    select a.*,b.* from customers a right outer join orders b on a.id = b.cid ;

[hadoop]
1.map端連接
    大表 + 小表(載入內容)。


2.reduce端連接
    大表 + 小表。

排序

1.部分排序

    nothing!!
    每個reduce中聚合的所有key都是排序的。

2.全排序

    對reduce輸出的所有key進行排序。
    2.1)設置一個reduce

    2.2)自定義分區類
        a)創建類
package com.hadoop.mr.sort.total;

                import org.apache.hadoop.io.IntWritable;
                import org.apache.hadoop.mapreduce.Partitioner;

                /**
                 * 自定義分區類,實現全排序
                 */
                public class YearPartitioner extends Partitioner<IntWritable, IntWritable> {

                    public int getPartition(IntWritable key, IntWritable value, int numPartitions) {
                        int year = key.get();
                        if(year < 1930){
                            return 0 ;
                        }
                        else if(year > 1960) {
                            return 2 ;
                        }
                        return 1 ;
                    }
                }
            b)設置app
                job.setPartitionerClass(YearPartitioner.class);

    2.3)使用採樣
        對輸入數據進行抽取,分析數據key分佈,界定分區線。
        採樣代碼需要在job的最後調用,sampler訪問conf的配置信息。
public static void main(String[] args) throws Exception {
                args = new String[]{"d:/java/mr/data/temp.seq", "d:/java/mr/out"};
                Configuration conf = new Configuration();
                FileSystem fs = FileSystem.get(conf);
                if(fs.exists(new Path(args[1]))){
                    fs.delete(new Path(args[1]),true);
                }

                Job job = Job.getInstance(conf);

                job.setJobName("maxTemp");
                job.setJarByClass(App.class);

                job.setMapperClass(MaxTempMapper.class);
                job.setReducerClass(MaxTempReducer.class);

                FileInputFormat.addInputPath(job,new Path(args[0]));
                FileOutputFormat.setOutputPath(job,new Path(args[1]));
                //設置combine輸入格式
                job.setInputFormatClass(SequenceFileInputFormat.class);
                job.setPartitionerClass(TotalOrderPartitioner.class);

                job.setNumReduceTasks(3);

                job.setMapOutputKeyClass(IntWritable.class);
                job.setMapOutputValueClass(IntWritable.class);

                job.setOutputKeyClass(IntWritable.class);
                job.setOutputValueClass(IntWritable.class);

                TotalOrderPartitioner.setPartitionFile(job.getConfiguration(),new Path("file:///d:/java/mr/par.seq"));
                //隨機採樣器
                InputSampler.RandomSampler<IntWritable,IntWritable> r = new InputSampler.RandomSampler<IntWritable, IntWritable>(1f,5,3);
                //創建分區文件
                InputSampler.writePartitionFile(job,r);

                job.waitForCompletion(true);
            }

3.二次排序

    secondary sort,輔助排序。
    對value進行排序。

3.1)自定義組合key

package com.hadoop.mr.sort.secondary;

            import org.apache.hadoop.io.WritableComparable;

            import java.io.DataInput;
            import java.io.DataOutput;
            import java.io.IOException;

            /**
             * 組合key
             */
            public class CombKey implements WritableComparable<CombKey>{
                public int year ;
                public int temp ;

                public int compareTo(CombKey o) {
                    int oyear = o.year ;
                    int otemp = o.temp ;
                    //同一年份
                    if(year == oyear){
                        return otemp - temp ;
                    }
                    else{
                        return year - oyear ;
                    }
                }

                public void write(DataOutput out) throws IOException {
                    out.writeInt(year);
                    out.writeInt(temp);
                }

                public void readFields(DataInput in) throws IOException {
                    this.year = in.readInt() ;
                    this.temp = in.readInt() ;

                }
            }

3.2)自定義分區類

        按照CombKey的year進行分區
public class YearPartitioner extends Partitioner<CombKey , NullWritable> {
                public int getPartition(CombKey key, NullWritable nullWritable, int numPartitions) {
                    return key.year % numPartitions ;
                }
            }

3.3)修改Mapper

package com.hadoop.mr.sort.secondary;

            import org.apache.hadoop.io.IntWritable;
            import org.apache.hadoop.io.NullWritable;
            import org.apache.hadoop.mapreduce.Mapper;

            import java.io.IOException;

            /**
             * WordCountMapper
             */
            public class MaxTempMapper extends Mapper<IntWritable, IntWritable, CombKey, NullWritable> {

                protected void map(IntWritable key, IntWritable value, Context context) throws IOException, InterruptedException {
                    int year = key.get() ;
                    int temp = value.get() ;
                    CombKey keyOut = new CombKey() ;
                    keyOut.year= year ;
                    keyOut.temp = temp ;
                    context.write(keyOut,NullWritable.get());
                }
            }

        3.4)ComboKeyComparator
            package com.it18zhang.hadoop.mr.sort.secondary;

            import org.apache.hadoop.io.WritableComparable;
            import org.apache.hadoop.io.WritableComparator;

            /**
             * 自定義key對比器
             */
            public class CombKeyComparator extends WritableComparator{
                protected CombKeyComparator() {
                    super(CombKey.class, true);
                }

                public int compare(WritableComparable k1, WritableComparable k2) {
                    CombKey ck1 = (CombKey) k1;
                    CombKey ck2 = (CombKey) k2;
                    return ck1.compareTo(ck2) ;
                }
            }

3.5)年分組對比器

package com.hadoop.mr.sort.secondary;

            import org.apache.hadoop.io.WritableComparable;
            import org.apache.hadoop.io.WritableComparator;

            /**
             * 年度分組對比器
             */
            public class YearGroupComparator extends WritableComparator{
                protected YearGroupComparator() {
                    super(CombKey.class, true);
                }

                public int compare(WritableComparable k1, WritableComparable k2) {
                    CombKey ck1 = (CombKey) k1;
                    CombKey ck2 = (CombKey) k2;
                    return ck1.year - ck2.year ;
                }
            }

3.6)Reducer類

package com.hadoop.mr.sort.secondary;

            import org.apache.hadoop.io.IntWritable;
            import org.apache.hadoop.io.NullWritable;
            import org.apache.hadoop.mapreduce.Reducer;

            import java.io.IOException;
            import java.util.Iterator;

            /**
             * reduce
             */
            public class MaxTempReducer extends Reducer<CombKey, NullWritable, IntWritable, IntWritable>{
                protected void reduce(CombKey key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
                    int year = key.year ;
                    int temp = key.temp ;
                    context.write(new IntWritable(year),new IntWritable(temp));

                }
            }

3.7)App

package com.hadoop.mr.sort.secondary;

            import org.apache.hadoop.conf.Configuration;
            import org.apache.hadoop.fs.FileSystem;
            import org.apache.hadoop.fs.Path;
            import org.apache.hadoop.io.IntWritable;
            import org.apache.hadoop.io.NullWritable;
            import org.apache.hadoop.mapreduce.Job;
            import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
            import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
            import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
            import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;

            /**
             */
            public class App {
                public static void main(String[] args) throws Exception {
                    args = new String[]{"d:/java/mr/data/temp.seq", "d:/java/mr/out"};
                    Configuration conf = new Configuration();
                    FileSystem fs = FileSystem.get(conf);
                    if(fs.exists(new Path(args[1]))){
                        fs.delete(new Path(args[1]),true);
                    }

                    Job job = Job.getInstance(conf);

                    job.setJobName("maxTemp");
                    job.setJarByClass(App.class);

                    job.setMapperClass(MaxTempMapper.class);
                    job.setReducerClass(MaxTempReducer.class);

                    FileInputFormat.addInputPath(job,new Path(args[0]));
                    FileOutputFormat.setOutputPath(job,new Path(args[1]));
                    //設置combine輸入格式
                    job.setInputFormatClass(SequenceFileInputFormat.class);
                    //year分區
                    job.setPartitionerClass(YearPartitioner.class);

                    job.setNumReduceTasks(3);

                    job.setMapOutputKeyClass(CombKey.class);
                    job.setMapOutputValueClass(NullWritable.class);

                    job.setOutputKeyClass(IntWritable.class);
                    job.setOutputValueClass(IntWritable.class);

                    job.setSortComparatorClass(CombKeyComparator.class);
                    job.setGroupingComparatorClass(YearGroupComparator.class);

                    job.waitForCompletion(true);
                }
            }
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章