SQOOP源碼分析1----ToolRunner實現Window平臺向Hadoop集羣提交任務

SQOOP源碼系列文章是把SQOOP源碼詳細、簡單地介紹給大家,本系列文章分爲3個部分,一是ToolRunner從Window本地提交MapReduce任務到HDFS,二是Sqoop從讀取配置文件到提交任務的過程分析,三是Sqoop中Map切割表數據到導入表的過程。

導讀:SQOOP通過生成的MapReduce向hadoop集羣提交任務,然而這個過程是怎樣的呢,我們通過Hadoop提供的WordCount來模擬實現這一個過程,在往後的文章會詳細地分析這些過程。

1.WordCount實現

package master.hadooptool;

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

public class WordCount {

	public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
		
		private final IntWritable one = new IntWritable(1);
		private Text word = new Text();

		public void map(Object key, Text value, Mapper<Object, Text, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			StringTokenizer itr = new StringTokenizer(value.toString());
			while (itr.hasMoreTokens()) {
				this.word.set(itr.nextToken());
				context.write(this.word, one);
			}
		}
	}

	public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
		
		private IntWritable result = new IntWritable();
		
		public void reduce(Text key, Iterable<IntWritable> values,
				Reducer<Text, IntWritable, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			int sum = 0;
			for (IntWritable val : values) {
				sum += val.get();
			}
			this.result.set(sum);
			context.write(key, this.result);
		}
	}
}

WordCount寫好後,把其打包成wordcount.jar,放到本地

2.運行Tool實現
SqoopTool繼承了Configured並且實現了Tool類,因此我們的運行工具類也需要實現。改類實現了以後,ToolRunner運行的時候,就會把本地的Jar包上傳到HDFS上,不需要我們操作

package master.hadooptool;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.util.GenericOptionsParser;

import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.PropertyConfigurator;

public class WordCountTool extends Configured implements Tool {

	public static void main(String[] args) throws Exception {

		PropertyConfigurator.configure("log4j.properties");
		System.setProperty("HADOOP_USER_NAME", "hadoop");
		
		//加載Hadoop配置文件到Configuration中
		String HADOOP_CONFS[] = { "core-site.xml", "hdfs-site.xml", "mapred-site.xml", "yarn-site.xml",
				"hive2-site.xml", "hbase-site.xml" };
		Configuration conf = new Configuration();
		for (String name : HADOOP_CONFS) {
			File file = new File(name);
			if (!file.exists()) {
				continue;
			}
			FileInputStream in = null;
			try {
				in = new FileInputStream(file);
			} catch (FileNotFoundException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
			conf.addResource(in);
		}

		WordCountTool tool = new WordCountTool();
		tool.setConf(conf);

		//調用ToolRunner來運行文件
		ToolRunner.run(tool.getConf(), tool, new String[] {});
	}


	public int run(String[] args) throws Exception {
	
		Configuration conf=super.getConf();
		
		conf.setBoolean("mapreduce.app-submission.cross-platform", true);//設置跨平臺提交
		conf.set("tmpjars", "file:/E:/hadoop/wordcount.jar"); // 加載wordcontjar文件,注意路徑格式
		
		// job.getConfiguration().set("mapred.jar","E:/hadoop/wordcount.jar");
		//mapred.jar是MapReduce所在的文件,tmpjars是MapReduce依賴庫,我們沒有依賴庫,選擇其中一個就可以,注意路徑格式
		
		//構建Job
		Job job = Job.getInstance(conf, "wordcount");
		job.setJarByClass(WordCountTool.class);
		job.setMapperClass(WordCount.TokenizerMapper.class);
		job.setCombinerClass(WordCount.IntSumReducer.class);
		job.setReducerClass(WordCount.IntSumReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		//設置輸入輸出路徑,需要準備一個文本格式的文件放到HDFS中
		FileInputFormat.addInputPath(job, new Path("/wordcount/wordcount.txt"));
		FileOutputFormat.setOutputPath(job, new Path("/wordcount/result"));

		int n=job.waitForCompletion(true)?0:-1;
		
		return n;
	}

}

3.依賴庫和配置文件
MavenDependency的pom.xml依賴

<dependencies>
		<dependency>
			<groupId>junit</groupId>
			<artifactId>junit</artifactId>
			<version>3.8.1</version>
			<scope>test</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.8.1</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-mapreduce-client-common</artifactId>
			<version>2.8.1</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
			<version>2.8.1</version>
			<scope>provided</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-hdfs-client</artifactId>
			<version>2.8.1</version>
			<scope>provided</scope>
		</dependency>
	</dependencies>

core-site.xml

<configuration>
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://hadoopmaster:9000</value>
	</property>
	<property>
		<name>fs.hdfs.impl</name>
		<value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
		<description>The FileSystem for hdfs: uris.</description>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/soft/hadoop_data/tmp</value>
	</property>
	<property>
		<name>hadoop.proxyuser.root.hosts</name>
		<value>*</value>
	</property>
	<property>
		<name>hadoop.proxyuser.root.groups</name>
		<value>*</value>
	</property>
	<property>	    
		<name>hadoop.proxyuser.hadoop.hosts</name>
		<value>*</value>
	</property>
	<property>
		<name>hadoop.proxyuser.hadoop.groups</name>
		<value>*</value>
	</property>
</configuration>

mapred-site.xml

<configuration> 
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>
</configuration>

yarn-site.xml

<configuration>
 <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>hadoopmaster</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
    <description>Whether virtual memory limits will be enforced for containers</description>
  </property>
  <property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>4</value>
    <description>Ratio between virtual memory to physical memory when setting memory limits for containers</description>
  </property>
</configuration>

在以上的配置文件中,需要把hadoopmaster改成自己Hadoop集羣的IP

4.運行結果
在這裏插入圖片描述

5.總結
Hadoop提供ToolRunner遠程向Hadoop集羣提交MapReduce任務,在Job中配置好tmpjars和mapred.jar後,集羣會把Jar上傳到HDFS,然後運行任務。
在現實的操作中還會遇到很多問題,需要我們冷靜地分析,然後解決問題,不斷學習、不斷進步。
最後,有什麼問題可以給我留言。。。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章