使用mapreduce清洗web訪問日誌並導入hive數據庫流程

編寫一個簡單的日誌清洗腳本,原始訪問日誌如下:

192.168.18.1 - - [16/Feb/2017:13:53:49 +0800] "GET /favicon.ico HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a007 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a003 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/運動鞋/a003 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b001 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b002 HTTP/1.1" 404 288
192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b003 HTTP/1.1" 404 288

1,按照格式做好樣式數據後,將原始數據導入到/user/hadoop/name目錄中;

2,創建java數據清洗執行文件:

    vim Namecount.java

import java.lang.String;
import java.io.IOException;
import java.util.*;
import java.text.SimpleDateFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.io.NullWritable;
 
public class Namecount {

         public static final SimpleDateFormat FORMAT = new SimpleDateFormat("d/MMM/yyyy:HH:mm:ss", Locale.ENGLISH); //原時間格式
         public static final SimpleDateFormat dateformat1 = new SimpleDateFormat("yyyy-MM-dd");//現時間格式
       private Date parseDateFormat(String string) {         //轉換時間格式
            Date parse = null;
            try {
                parse = FORMAT.parse(string);
            } catch (Exception e) {
                e.printStackTrace();
            }
            return parse;
        }
        
        public String[] parse(String line) {
            String ip = parseIP(line);       //ip
            String time = parseTime(line);   //時間
            String url = parseURL(line);     //url
            String status = parseStatus(line); //狀態
            String traffic = parseTraffic(line);//流量

            return new String[] { ip, time, url, status, traffic };
        } 
        private String parseTraffic(String line) {    //流量
            final String trim = line.substring(line.lastIndexOf("\"") + 1)
                    .trim();
            String traffic = trim.split(" ")[1];
            return traffic;
        }
       private String parseStatus(String line) {     //狀態
            final String trim = line.substring(line.lastIndexOf("\"") + 1)
                    .trim();
            String status = trim.split(" ")[0];
            return status;
        }

        private String parseURL(String line) {       //url
            final int first = line.indexOf("\"");
            final int last = line.lastIndexOf("\"");
            String url = line.substring(first + 1, last);
            return url;
        }
        private String parseTime(String line) {    //時間
            final int first = line.indexOf("[");
            final int last = line.indexOf("+0800]");
            String time = line.substring(first + 1, last).trim();
            Date date = parseDateFormat(time);
            return dateformat1.format(date);
        }
        private String parseIP(String line) {     //ip
            String ip = line.split("- -")[0].trim();
            return ip;
        }
    public static class Map extends
            Mapper<LongWritable, Text, Text, IntWritable> {
                
        public void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            // 將輸入的純文本文件的數據轉化成String
            Text outputValue = new Text();
            String line = value.toString();
             Namecount aa=new Namecount();
            StringTokenizer tokenizerArticle = new StringTokenizer(line, "\n");
 
            // 分別對每一行進行處理
            while (tokenizerArticle.hasMoreElements()) {
                // 每行按空格劃分
              String stra=tokenizerArticle.nextToken().toString();
              String [] Newstr=aa.parse(stra);

           if (Newstr[2].startsWith("GET /")) { //過濾開頭字符串
                Newstr[2] = Newstr[2].substring("GET /".length());
            } 
          else if (Newstr[2].startsWith("POST /")) {
                Newstr[2] = Newstr[2].substring("POST /".length());
            }
           if (Newstr[2].endsWith(" HTTP/1.1")) { //過濾結尾字符串
                Newstr[2] = Newstr[2].substring(0, Newstr[2].length()
                        - " HTTP/1.1".length());
            }
              String[] words = Newstr[2].split("/");
              if(words.length==4){
                  outputValue.set(Newstr[0] + "\t" + Newstr[1] + "\t" + words[0]+"\t"+words[1]+"\t"+words[2]+"\t"+words[3]+"\t"+"0");
                   context.write(outputValue,new IntWritable(1));                 
}    
    }
  }
}
 
    public static class Reduce extends
            Reducer<Text, IntWritable, Text, IntWritable> {
        // 實現reduce函數
        public void reduce(Text key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {
          int sum = 0;
            Iterator<IntWritable> iterator = values.iterator();
            while (iterator.hasNext()) {
                sum += iterator.next().get();
            }
            context.write(key, new IntWritable(sum));
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        
	conf.set("mapred.jar","Namecount.jar");
 
        String[] ioArgs = new String[] { "name", "name_out" };
        String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();
        if (otherArgs.length != 2) {
            System.err.println("Usage: Score Average <in> <out>");
            System.exit(2);
        }
 
        Job job = new Job(conf, "name_goods_count");
        job.setJarByClass(Namecount.class);
 
        // 設置Map、Combine和Reduce處理類
        job.setMapperClass(Map.class);
        job.setCombinerClass(Reduce.class);
        job.setReducerClass(Reduce.class);
 
        // 設置輸出類型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
 
        // 將輸入的數據集分割成小數據塊splites,提供一個RecordReder的實現
        job.setInputFormatClass(TextInputFormat.class);
        // 提供一個RecordWriter的實現,負責數據輸出
        job.setOutputFormatClass(TextOutputFormat.class);
 
        // 設置輸入和輸出目錄
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}


3,編譯執行

[hadoop@h85 mr]$ /usr/jdk1.7.0_25/bin/javac Namecount.java
[hadoop@h85 mr]$ /usr/jdk1.7.0_25/bin/jar cvf Namecount.jar Namecount*class
[hadoop@h85 mr]$ hadoop jar Namecount.jar Namecount

輸出的結果被保存在/user/hadoop/name_out/part-r-00000

4,hive中創建有相應字段的表:(字段)

例如: ip string       acc_date string    wp string   sex string(鞋子種類)   type(鞋子種類) string     nid(鞋子編號) string   quanzhong(權重) int         count int
例如:192.168.18.2    20170216            鞋子            男鞋                   運動鞋                          a001                0                    13

創建表:

  create table acc_log(ip string,acc_date string,wp string,sex string,type string,nid string,quanzhong int,count int) row format delimited fields terminated by '\t';

抽取數據:

  load data inpath '/user/hadoop/name_out/part-r-00000' into table acc_log;

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章