快速入門MapReduce③ MapReduce綜合練習之上網流量統計

目錄

      需求:

     1.創建maven項目導入pom.xml

     2.自定義map輸出value對象FlowBean

     3.定義map類

     4.定義reduce類

     5.定義啓動類

     6.輸入的文件及結果


需求:

統計每個手機號的上行流量總和,下行流量總和,上行總流量之和,下行總流量之和
分析:以手機號碼作爲key值,上行流量,下行流量,上行總流量,下行總流量四個字段作爲value值,然後以這個key,和value作爲map階段的輸出,reduce階段的輸入

1.創建maven項目導入pom.xml

    <repositories>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
        </repository>
    </repositories>

    <dependencies>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.10</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-client</artifactId>
            <version>2.6.0-mr1-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-common</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-hdfs</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-mapreduce-client-core</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.11</version>
            <scope>test</scope>
        </dependency>
        <dependency>
            <groupId>org.testng</groupId>
            <artifactId>testng</artifactId>
            <version>RELEASE</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
            </plugin>

            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.4.3</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <minimizeJar>true</minimizeJar>
                        </configuration>
                    </execution>
                </executions>
            </plugin>

        </plugins>
    </build>

2.自定義map輸出value對象FlowBean

package com.czxy.flow;

import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

@Data
@NoArgsConstructor
public class FlowBean implements Writable {
    private Integer upFlow;
    private Integer downFlow;
    private Integer upCountFlow;
    private Integer downCountFlow;

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(upCountFlow);
        out.writeInt(downCountFlow);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.upFlow = in.readInt();
        this.downFlow = in.readInt();
        this.upCountFlow = in.readInt();
        this.downCountFlow = in.readInt();
    }
}

3.定義map類

package com.czxy.flow;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
    // 創建FlowBean對象
    FlowBean flowBean = new FlowBean();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //136315798****	13726230	00-FD-07-A4-72-B8:CMCC	120.196.100.82	i02.c.aliimg.com	遊戲娛樂	24	27	2481	24681	200
        // 類型轉換
        String s = value.toString();
        // 字符串切割
        String[] split = s.split("\t");
        //給對象添加信息
        flowBean.setUpFlow(Integer.parseInt(split[6]));
        flowBean.setDownFlow(Integer.parseInt(split[7]));
        flowBean.setUpCountFlow(Integer.parseInt(split[8]));
        flowBean.setDownCountFlow(Integer.parseInt(split[9]));
        // 輸出
        context.write(new Text(split[1]),flowBean);
    }
}

4.定義reduce類

package com.czxy.flow;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowReduce extends Reducer<Text, FlowBean, Text, FlowBean> {
    private FlowBean flowBean = new FlowBean();

    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        // 定義變量
        int UpFlow = 0;
        int DownFlow = 0;
        int UpCountFlow = 0;
        int DownCountFlow = 0;
        // 遍歷
        for (FlowBean value : values) {
            // 累加
            UpFlow += value.getUpFlow();
            DownFlow += value.getDownFlow();
            UpCountFlow += value.getUpCountFlow();
            DownCountFlow += value.getDownCountFlow();
        }
        // 給對象添加信息
        flowBean.setUpFlow(UpFlow);
        flowBean.setDownFlow(DownFlow);
        flowBean.setUpCountFlow(UpCountFlow);
        flowBean.setDownCountFlow(DownCountFlow);
        // 輸出
        context.write(key, flowBean);
    }
}

5.定義啓動類

package com.czxy.flow;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FlowDriver extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        // 獲取job
        Job job = Job.getInstance(new Configuration());
        //  設置支持jar執行
        job.setJarByClass(FlowDriver.class);
        // 設置執行的napper
        job.setMapperClass(FlowMapper.class);
        // 設置map輸出的key類型
        job.setMapOutputKeyClass(Text.class);
        // 設置map輸出value類型
        job.setMapOutputValueClass(FlowBean.class);
        // 設置執行的reduce
        job.setReducerClass(FlowReduce.class);
        // 設置reduce輸出key的類型
        job.setOutputKeyClass(Text.class);
        // 設置reduce輸出value的類型
        job.setOutputValueClass(FlowBean.class);
        // 設置文件輸入
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job, new Path("./data/flow/"));
        // 設置文件輸出
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job, new Path("./outPut/flow/"));
        // 設置啓動類
        boolean b = job.waitForCompletion(true);
        return b ? 0 : 1;
    }

    public static void main(String[] args) throws Exception {
        ToolRunner.run(new FlowDriver(), args);
    }
}

6.輸入的文件及結果

          點擊下載(提取碼 0t53 )

執行結果 part-r-00000

 

1

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章