繼承上文“spark-core_01: $SPARK_HOME/bin/spark-shell腳本分析”
一,$SPARK_HOME/bin/spark-submit
################################################
#從spark-shell調用之後,傳進來--class org.apache.spark.repl.Main --name "Spark shell" --master spark://luyl152:7077
#先檢測spark_home,然後去調用spark_home/bin/spark-class 會將org.apache.spark.deploy.SparkSubmit作爲第一個參數,
#----- 會執行腳本spark-class org.apache.spark.deploy.SparkSubmit --class org.apache.spark.repl.Main --name"Spark shell" --master spark://luyl152:7077
#####################################
#!/usr/bin/envbash
if [-z "${SPARK_HOME}" ]; then
export SPARK_HOME="$(cd "`dirname"$0"`"/..; pwd)"
fi
#disable randomized hash for string in Python 3.3+
exportPYTHONHASHSEED=0
#exec 執行完面的命令,exec 命令,是創建一個新的進程,只不過這個進程與前一個進程的ID是一樣的。這樣,原來的腳本剩餘的部分代碼就不能執行了,因爲相當於換了一個進程。
exec"${SPARK_HOME}"/bin/spark-class org.apache.spark.deploy.SparkSubmit" $@"
二、$SPARK_HOME/bin/spark-class
###########################################################################
#--如果是spark-shell從spark-submit腳本傳進來如下參數:
org.apache.spark.deploy.SparkSubmit --classorg.apache.spark.repl.Main --name "Spark shell" --masterspark://luyl152:7077
#如果自己的application則直接執行spark-submit 腳本傳入自己的--class等參數信息就可以
###########################################################################
#!/usr/bin/envbash
#還是判斷了一下SPARK_HOME環境變量是否存在
if [-z "${SPARK_HOME}" ]; then
export SPARK_HOME="$(cd "`dirname"$0"`"/..; pwd)"
fi
#---執行shell,不加“. 和空格”也能執行:配置一些環境變量,它會將conf/spark-env.sh中的環境變量加載進來:
. "${SPARK_HOME}"/bin/load-spark-env.sh
# ---Find the java binary 如果有java_home環境變量會將java_home/bin/java給RUNNER
#if [ -n str ] 表示當串的長度大於0時爲真
if [-n "${JAVA_HOME}" ]; then
RUNNER="${JAVA_HOME}/bin/java"
else
#---command -v 和which的功能一樣
if [ `command -v java` ]; then
RUNNER="java"
else
echo "JAVA_HOME is not set">&2
exit 1
fi
fi
#--- Find assembly jar : 會先找spark_home/RELESE文本是否存在,如果存在將spark_home/lib目錄給變量ASSEMBLY_DIR
SPARK_ASSEMBLY_JAR=
if [-f "${SPARK_HOME}/RELEASE" ]; then
ASSEMBLY_DIR="${SPARK_HOME}/lib"
else
ASSEMBLY_DIR="${SPARK_HOME}/assembly/target/scala-$SPARK_SCALA_VERSION"
fi
#---ls -1與ls -l的區別在於ls -1只會返回文件名,沒有文件類型,大小,日期等信息。num_jars返回spark-assembly的jar有多少個
GREP_OPTIONS=
num_jars="$(ls-1 "$ASSEMBLY_DIR" | grep "^spark-assembly.*hadoop.*\.jar$"| wc -l)"
#---如果$num_jars爲0,會報錯並退出
if ["$num_jars" -eq "0" -a -z "$SPARK_ASSEMBLY_JAR"-a "$SPARK_PREPEND_CLASSES" != "1" ]; then
echo "Failed to find Spark assembly in$ASSEMBLY_DIR." 1>&2
echo "You need to build Spark beforerunning this program." 1>&2
exit 1
fi
if [-d "$ASSEMBLY_DIR" ]; then
#---ls 後面加 || true和不加效果一樣,還是會返回這個串的:spark-assembly-1.6.0-hadoop2.6.0.jar
ASSEMBLY_JARS="$(ls -1"$ASSEMBLY_DIR" | grep "^spark-assembly.*hadoop.*\.jar$" ||true)"
#---這個$num_jars不能大於1,否則會退出
if [ "$num_jars" -gt "1"]; then
echo "Found multiple Spark assemblyjars in $ASSEMBLY_DIR:" 1>&2
echo "$ASSEMBLY_JARS" 1>&2
echo "Please remove all but onejar." 1>&2
exit 1
fi
fi
#---將spark_home/lib/spark-assembly-1.6.0-hadoop2.6.0.jar給變量SPARK_ASSEMBLY_JAR
SPARK_ASSEMBLY_JAR="${ASSEMBLY_DIR}/${ASSEMBLY_JARS}"
#---還是將spark_home/lib/spark-assembly-1.6.0-hadoop2.6.0.jar賦給別一個變量LAUNCH_CLASSPATH
LAUNCH_CLASSPATH="$SPARK_ASSEMBLY_JAR"
#Add the launcher build dir to the classpath if requested.
#---if [ -n str ] 表示當串的長度大於0時爲真
if [-n "$SPARK_PREPEND_CLASSES" ]; then
LAUNCH_CLASSPATH="${SPARK_HOME}/launcher/target/scala-$SPARK_SCALA_VERSION/classes:$LAUNCH_CLASSPATH"
fi
#--- 將spark_home/lib/spark-assembly-1.6.0-hadoop2.6.0.jar值給全局變量_SPARK_ASSEMBLY
export_SPARK_ASSEMBLY="$SPARK_ASSEMBLY_JAR"
#For tests
if[[ -n "$SPARK_TESTING" ]]; then
unset YARN_CONF_DIR
unset HADOOP_CONF_DIR
fi
#The launcher library will print arguments separated by a NULL character, toallow arguments with
#characters that would be otherwise interpreted by the shell. Read that in awhile loop, populating
# anarray that will be used to exec the final command.
#######################################################
#啓動程序庫將打印由空字符分隔的參數,以允許帶有由shell解釋的字符的參數。 在while循環中讀取它,填充將用於執行最終命令的數組。
#如果要調試輸入如下$JAVA_OPTS環境變量即可
#export JAVA_OPTS="$JAVA_OPTS -Xdebug-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"
#如果想調試org.apache.spark.launcher.Main,需要將$JAVA_OPTS放在$RUNNER後面就可以了,
#如果想直接調用SparkSubmit.scala可以將$JAVA_OPTS放在 launcher.Main的後面
#-----spark-class先執行org.apache.spark.launcher.Main,由launcher.Main來解析腳本參數,然後由Main方法將打印的結果給while語句,再次執行SparkSubmit類中的main方法;
#小括號有,數組或執行的命令的意思
#java -cp spark_home/lib/spark-assembly-1.6.0-hadoop2.6.0.jarorg.apache.spark.launcher.Main org.apache.spark.deploy.SparkSubmit --class org.apache.spark.repl.Main--name "Spark shell" --master spark://luyl152:7077
#######################################################
CMD=()
whileIFS= read -d '' -r ARG; do
echo "===========>"$ARG
CMD+=("$ARG")
done< <("$RUNNER" -cp "$LAUNCH_CLASSPATH"org.apache.spark.launcher.Main "$@")
exec"${CMD[@]}"
#launcher.Main返回的數據存儲到CMD數組中,可以通過打印查看$ARG裏面的內容,實際上就是laucher.Main裏面的main打印的參數
[root@luyl155 bin]# ./spark-shell --masterspark://luyl152:7077,luyl153:7077,luyl154:7077 --verbose /tool/jarDir/maven_scala-1.0-SNAPSHOT.jar
#ARG打印的內容如下,這樣看來${CMD[@]}就是讓 “java -cp 執行第三行包中類即SparkSubmit, 使用jvm參數(即第四,五,六,七行),然後執行SparkSubmit 後面的是SparkSubmit參數是 --class 等內容
===========>/usr/local/java/jdk1.8.0_91/bin/java
===========>-cp
===========>/data/spark-1.6.0-bin-hadoop2.6/conf/:/data/spark-1.6.0-bin-hadoop2.6/lib/spark-assembly-1.6.0-hadoop2.6.0.jar:
/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-api-jdo-3.2.6.jar:/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-rdbms-3.2.9.jar:
/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-core-3.2.10.jar:/data/hadoop-2.6.5/etc/hadoop/
===========>-Xms1g
===========>-Xmx1g
===========>-Xdebug
===========>-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005
===========>org.apache.spark.deploy.SparkSubmit
===========>--class
===========>org.apache.spark.repl.Main
===========>--name
===========>Sparkshell
===========>--master
===========>spark://luyl152:7077,luyl153:7077,luyl154:7077
===========>--verbose
===========>/tool/jarDir/maven_scala-1.0-SNAPSHOT.jar
===========>spark://luyl152:7077,luyl153:7077,luyl154:7077
接下來分析一下org.apache.spark.launcher.Main源碼