1、pom.xml
●創建Maven項目並補全目錄、配置pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.itcast</groupId>
<artifactId>SparkDemo</artifactId>
<version>1.0-SNAPSHOT</version>
<!-- 指定倉庫位置,依次爲aliyun、cloudera和jboss倉庫 -->
<repositories>
<repository>
<id>aliyun</id>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
</repository>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
<repository>
<id>jboss</id>
<url>http://repository.jboss.com/nexus/content/groups/public</url>
</repository>
</repositories>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<encoding>UTF-8</encoding>
<scala.version>2.11.8</scala.version>
<scala.compat.version>2.11</scala.compat.version>
<hadoop.version>2.7.4</hadoop.version>
<spark.version>2.2.0</spark.version>
</properties>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive-thriftserver_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- <dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>${spark.version}</version>
</dependency>-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql-kafka-0-10_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!--<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.2.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.2.0-cdh5.14.0</version>
</dependency>-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.4</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>config</artifactId>
<version>1.3.3</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.38</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src/main/scala</sourceDirectory>
<testSourceDirectory>src/test/scala</testSourceDirectory>
<plugins>
<!-- 指定編譯java的插件 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
</plugin>
<!-- 指定編譯scala的插件 -->
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
<configuration>
<args>
<arg>-dependencyfile</arg>
<arg>${project.build.directory}/.scala_dependencies</arg>
</args>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.18.1</version>
<configuration>
<useFile>false</useFile>
<disableXmlReport>true</disableXmlReport>
<includes>
<include>**/*Test.*</include>
<include>**/*Suite.*</include>
</includes>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass></mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
●maven-assembly-plugin和maven-shade-plugin的區別
https://blog.csdn.net/lisheng19870305/article/details/88300951
2、本地運行
package cn.itcast.sparkhello
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit = {
//1.創建SparkContext
val config = new SparkConf().setAppName("wc").setMaster("local[*]")
val sc = new SparkContext(config)
sc.setLogLevel("WARN")
//2.讀取文件
//A Resilient Distributed Dataset (RDD)彈性分佈式數據集
//可以簡單理解爲分佈式的集合,但是spark對它做了很多的封裝,
//讓程序員使用起來就像操作本地集合一樣簡單,這樣大家就很happy了
val fileRDD: RDD[String] = sc.textFile("D:\\授課\\190429\\資料\\data\\words.txt")
//3.處理數據
//3.1對每一行按空切分並壓平形成一個新的集合中裝的一個個的單詞
//flatMap是對集合中的每一個元素進行操作,再進行壓平
val wordRDD: RDD[String] = fileRDD.flatMap(_.split(" "))
//3.2每個單詞記爲1
val wordAndOneRDD: RDD[(String, Int)] = wordRDD.map((_,1))
//3.3根據key進行聚合,統計每個單詞的數量
//wordAndOneRDD.reduceByKey((a,b)=>a+b)
//第一個_:之前累加的結果
//第二個_:當前進來的數據
val wordAndCount: RDD[(String, Int)] = wordAndOneRDD.reduceByKey(_+_)
//4.收集結果
val result: Array[(String, Int)] = wordAndCount.collect()
result.foreach(println)
}
}
3、集羣運行
●修改代碼
package cn.itcast.sparkhello
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit = {
//1.創建SparkContext
val config = new SparkConf().setAppName("wc")//.setMaster("local[*]")
val sc = new SparkContext(config)
sc.setLogLevel("WARN")
//2.讀取文件
//A Resilient Distributed Dataset (RDD)彈性分佈式數據集
//可以簡單理解爲分佈式的集合,但是spark對它做了很多的封裝,
//讓程序員使用起來就像操作本地集合一樣簡單,這樣大家就很happy了
val fileRDD: RDD[String] = sc.textFile(args(0)) //文件輸入路徑
//3.處理數據
//3.1對每一行按空切分並壓平形成一個新的集合中裝的一個個的單詞
//flatMap是對集合中的每一個元素進行操作,再進行壓平
val wordRDD: RDD[String] = fileRDD.flatMap(_.split(" "))
//3.2每個單詞記爲1
val wordAndOneRDD: RDD[(String, Int)] = wordRDD.map((_,1))
//3.3根據key進行聚合,統計每個單詞的數量
//wordAndOneRDD.reduceByKey((a,b)=>a+b)
//第一個_:之前累加的結果
//第二個_:當前進來的數據
val wordAndCount: RDD[(String, Int)] = wordAndOneRDD.reduceByKey(_+_)
wordAndCount.saveAsTextFile(args(1))//文件輸出路徑
//4.收集結果
//val result: Array[(String, Int)] = wordAndCount.collect()
//result.foreach(println)
}
}
4、打包上傳
●執行命令提交到Spark-HA集羣
/export/servers/spark-2.2.0-bin-2.6.0-cdh5.14.0/bin/spark-submit \
--class cn.itcast.sparkhello.WordCount \
--master spark://node01:7077,node02:7077 \
--executor-memory 1g \
--total-executor-cores 2 \
/root/wc.jar \
hdfs://node01:8020/aa.txt \
hdfs://node01:8020/cc
●執行命令提交到YARN集羣
/export/servers/spark-2.2.0-bin-2.6.0-cdh5.14.0/bin/spark-submit \
--class cn.itcast.sparkhello.WordCount \
--master yarn \
--deploy-mode cluster \
--driver-memory 1g \
--executor-memory 1g \
--executor-cores 2 \
--queue default \
/root/wc.jar \
hdfs://node01:8020/wordcount/input/words.txt \
hdfs://node01:8020/wordcount/output5
Java8版[瞭解]
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
public class WordCount_Java {
public static void main(String[] args){
SparkConf conf = new SparkConf().setAppName("wc").setMaster("local[*]");
JavaSparkContext jsc = new JavaSparkContext(conf);
JavaRDD<String> fileRDD = jsc.textFile("D:\\授課\\190429\\資料\\data\\words.txt");
JavaRDD<String> wordRDD = fileRDD.flatMap(s -> Arrays.asList(s.split(" ")).iterator());
JavaPairRDD<String, Integer> wordAndOne = wordRDD.mapToPair(w -> new Tuple2<>(w, 1));
JavaPairRDD<String, Integer> wordAndCount = wordAndOne.reduceByKey((a, b) -> a + b);
//wordAndCount.collect().forEach(t->System.out.println(t));
wordAndCount.collect().forEach(System.out::println);
//函數式編程的核心思想:行爲參數化!
}
}
鐵鐵們,下期給大家分享SparkCore
!