MapReduce的DBInputFormat

使用MapReduce直接從關係型數據庫中取數據,需要將數據庫驅動包放到hadoop的classpath下,

執行hadoop classpath可以查看路徑信息,放到顯示的任一個路徑下即可。

package com.bigdata.hadoop.mapred;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URI;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class DBInputFormatApp {

	private static final String OUTPUT_PATH = "hdfs://hadoop1:9000/out";

	public static void main(String[] args) throws Exception {
		Configuration configuration = new Configuration();
		Job job = new Job(configuration, KpiApp.class.getSimpleName());
		DBConfiguration.configureDB(configuration, "com.mysql.jdbc.Driver",
				"jdbc:mysql://localhost:3306/test", "root", "hadoop");
				
		final FileSystem fileSystem = FileSystem.get(new URI(OUTPUT_PATH),
				configuration);
		fileSystem.delete(new Path(OUTPUT_PATH), true);

		job.setJarByClass(DBInputFormatApp.class);
		
		job.setInputFormatClass(DBInputFormat.class);
		//Job job, Class inputClass, String tableName, String conditions, String orderBy, String... fieldNames
		DBInputFormat.setInput(job, MyUser.class, "myuser", null,null,"id","name");

		// 當map輸出類型和reduce輸出類型一致時,可以不設置
		job.setMapperClass(MyMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(LongWritable.class);
		
		//指定不需要使用reduce,直接把map任務的輸出寫入到HDFS中
		job.setNumReduceTasks(0);

		FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH));
		
		DistributedCache.addFileToClassPath(new Path("/usr/local/mysql-connector-java-5.1.10.jar"), configuration,fileSystem);
		
		job.waitForCompletion(true);
	}

	public static class MyMapper extends
			Mapper<LongWritable, MyUser, Text, LongWritable> {
		final Text k2 = new Text();

		@Override
		protected void map(LongWritable key, MyUser value,
				Mapper<LongWritable, MyUser, Text, LongWritable>.Context context)
				throws IOException, InterruptedException {
			final String line = value.toString();
			final String[] splited = line.split("\t");
			
			for (String string : splited) {
				context.write(new Text(string), new LongWritable(1));
			}
		}
	}
	
	public static class MyUser implements Writable,DBWritable{
		int id;
		String name;
		
		@Override
		public void readFields(DataInput in) throws IOException {
			this.id = in.readInt();
			this.name = Text.readString(in);
		}
		@Override
		public void write(DataOutput out) throws IOException {
			out.writeInt(id);
			Text.writeString(out, name);
		}
		
		@Override
		public void readFields(ResultSet resultSet) throws SQLException {
			this.id = resultSet.getInt(1);
			this.name = resultSet.getString(2);
		}
		
		@Override
		public void write(PreparedStatement statement) throws SQLException {
			statement.setInt(1, id);
			statement.setString(2, name);
		}
		
		@Override
		public String toString() {
			return this.id + ":" + this.name;
		}
	}
}


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章