基礎環境搭建
- 基於前面的文章,hadoop已經搭建好,下面我們將進行spark on yarn搭建
- 下載並配置scala,我們選擇scala-2.12.8/這個版本即可,下載解壓scala即可
配置環境
# scala
export SCALA_HOME=/opt/bigdata/scala/default
spark配置
spark下載
tar -zxvf spark-2.4.3-bin-hadoop2.7.tgz -C ./
spark配置文件
- spark-env.sh配置
cp spark-env.sh.template spark-env.sh
vim spark-env.sh 配置一下信息
export JAVA_HOME=/usr/local/java_1.8.0_121
#SCALA環境變量
export SCALA_HOME=/opt/bigdata/scala/default
#Hadoop路徑
export HADOOP_HOME=/opt/bigdata/hadoop/default
#Hadoop配置目錄
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export SPARK_YARN_USER_ENV=${HADOOP_CONF_DIR}
export SPARK_HOME=/opt/bigdata/spark/default
export HIVE_HOME=/opt/bigdata/hive/default
export HIVE_CONF_DIR=${HIVE_HOME}/conf
export PATH=${JAVA_HOME}/bin:${SCALA_HOME}/bin:${HADOOP_HOME}/bin:${SPARK_HOME}/bin:${HIVE_HOME}/bin:$PATH
- spark-defaults.conf配置
cp spark-defaults.conf.template spark-defaults.conf
vim spark-defaults.conf 配置如下信息
# spark job log收集,收集到hdfs上
spark.eventLog.enabled true
spark.eventLog.dir hdfs://ecs-6531-0002.novalocal:9000/tmp/spark/eventLogs
spark.eventLog.compress true
#默認序列化方式
spark.serializer org.apache.spark.serializer.KryoSerializer
# 部署模式yarn
spark.master yarn
# 默認driver核心數
spark.driver.cores 1
# 默認driver內存數
spark.driver.memory 800m
# 默認executer核心數
spark.executor.cores 1
# 默認executer內存數
spark.executor.memory 1000m
# 默認executer實例數
spark.executor.instances 1
# hive倉庫地址
spark.sql.warehouse.dir hdfs://ecs-6531-0002.novalocal:9000/user/root/warehouse
- 拷貝hive-site.xml到spark conf下,因爲要連接hive
cp $HIVE_HOME/conf/hive-site.xml $SPARK_HOME/conf/hive-site.xml
環境配置
- vim /etc/profile
# spark 配置
export SPARK_YARN_USER_ENV=${HADOOP_CONF_DIR}
export SPARK_HOME=/opt/bigdata/spark/default
export PATH=${SCALA_HOME}/bin:${SPARK_HOME}/bin:$PATH
拷貝包
- 拷貝spark shuffle on yarn到包到yarn的目錄下
cp /opt/bigdata/spark/spark-2.4.3-bin-hadoop2.7/yarn/spark-2.4.3-yarn-shuffle.jar /opt/bigdata/hadoop/hadoop-3.2.0/share/hadoop/yarn/
yarn配置
- 配置yarn-site.xml文件
需要把spark_shuffle加上
<!-- 指定reducer獲取數據的方式-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle,spark_shuffle</value>
</property>
- 重啓yarn
spark啓動測試
-
直接輸入spark-sql啓動
-
測試查詢hive
-
使用spark提交任務 直接spark-submit jar包即可