CentOS 6.5離線安裝cloudera manager hadoop spark集羣

1、主機規劃

主機名 chd1 10.10.5.201 8G 2CPU
主機名 chd2 10.10.5.202 8G 2CPU
主機名 chd3 10.10.5.203 8G 2CPU
主機名 chd4 10.10.5.204 8G 2CPU

2、防火牆關閉

 # service iptables stop
 # chkconfig iptables off restart

3、yum源更新

更新國內的yum源,速度更快。

# cd /etc
# cp -r yum.repos.d/ yum.repos.d.bak
# cd yum.repos.d
# rm -fr *
# vi CentOS6-Base-163.repo 

以下爲CentOS6-Base-163.repo的內容。

# CentOS-Base.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the 
# remarked out baseurl= line instead.
#
#

[base]
name=CentOS-$releasever - Base - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6

#released updates 
[updates]
name=CentOS-$releasever - Updates - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6

#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6

#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6

#contrib - packages by Centos Users
[contrib]
name=CentOS-$releasever - Contrib - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/contrib/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6

更新玩yum源後,執行下邊命令更新yum配置,使操作立即生效

# yum makecache

4、關閉selinux

# cd /etc/selinux/
# vim config

將SELINUX 修改爲disabled

 # This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these two values:
#     targeted - Targeted processes are protected,
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

5、安裝openssh等

# yum -y install openssh-server openssh-clients wget vim
# yum install mlocate -y
updatedb

6、免密碼登錄

# vi /etc/hosts

編輯hosts表

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.5.201 cdh1
10.10.5.202 cdh2
10.10.5.203 cdh3
10.10.5.204 cdh4

每臺機器上執行:

# ssh-keygen -t rsa

一路回車

# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# scp cdh2:~/.ssh/id_rsa.pub id_rsa.pub.cdh2
# scp cdh3:~/.ssh/id_rsa.pub id_rsa.pub.cdh3
# scp cdh4:~/.ssh/id_rsa.pub id_rsa.pub.cdh4
# cat id_rsa.pub.cdh2 >> authorized_keys
# cat id_rsa.pub.cdh3 >> authorized_keys
# cat id_rsa.pub.cdh4 >> authorized_keys
# chmod 600 authorized_keys
# scp authorized_keys cdh2:~/.ssh/authorized_keys
# scp authorized_keys cdh3:~/.ssh/authorized_keys
# scp authorized_keys cdh4:~/.ssh/authorized_keys

7、oracle官網下載 jdk8, 並安裝

jdk-8u60-linux-x64.rpm

# rpm -qa | grep java

如果有以下則刪除。

java-1.7.0-openjdk-1.7.0.45-2.4.3.3.el6.x86_64
java-1.6.0-openjdk-1.6.0.0-1.66.1.13.0.el6.x86_64
tzdata-java-2013g-1.el6.noarch

如果有則刪除

# rpm -e --nodeps java-1.7.0-openjdk-1.7.0.45-2.4.3.3.el6.x86_64
# rpm -e --nodeps java-1.6.0-openjdk-1.6.0.0-1.66.1.13.0.el6.x86_64
# rpm -ivh jdk-8u60-linux-x64.rpm
# java -version
java version "1.8.0_60"
Java(TM) SE Runtime Environment (build 1.8.0_60-b27)
Java HotSpot(TM) 64-Bit Server VM (build 25.60-b23, mixed mode)

8、環境變量

編輯 /etc/profile文件

#vim /etc/profile
export JAVA_HOME=/usr/java/jdk1.8.0_60
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
# source /etc/profile

9、配置ntp服務. ntp服務主節點爲cdh4

每個節點

# yum install ntp -y

# service ntpd start
# chkconfig ntpd on

ntp服務主節點,此處爲cdh4

# ntpstat
synchronised to NTP server (202.112.29.82) at stratum 3 
   time correct to within 3994 ms
   polling server every 64 s點

其他節點

# vim /etc/ntp.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server chd4 prefer
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
# ntpdate -u cdh4

10、安裝mysql

安裝到CDH1主機,
參考文章http://blog.csdn.net/feifeilyj/article/details/52141052
注意修改字符集爲 utf-8

11、創建hive用戶及其他用戶,可根據服務建庫和用戶

create database hivedb;
create database huedb;
create database scm;
create database ooziedb;
create database actmonitordb;
FLUSH PRIVILEGES;
CREATE USER hive IDENTIFIED BY 'hive';
CREATE USER hive@localhost IDENTIFIED BY 'hive';
CREATE USER hive@'cdh1' IDENTIFIED BY 'hive';
grant all privileges on *.* to hive with grant option;
grant all privileges on *.* to hive@localhost with grant option;
grant all privileges on *.* to hive@'cdh1' with grant option;
FLUSH PRIVILEGES;

FLUSH PRIVILEGES;
CREATE USER scm IDENTIFIED BY 'scm';
CREATE USER scm@localhost IDENTIFIED BY 'scm';
CREATE USER scm@'cdh1' IDENTIFIED BY 'scm';
grant all privileges on *.* to scm with grant option;
grant all privileges on *.* to scm@localhost with grant option;
grant all privileges on *.* to scm@'cdh1' with grant option;
FLUSH PRIVILEGES;


FLUSH PRIVILEGES;
CREATE USER hue IDENTIFIED BY 'hue';
CREATE USER hue@localhost IDENTIFIED BY 'hue';
CREATE USER hue@'cdh1' IDENTIFIED BY 'hue';
grant all privileges on *.* to hue with grant option;
grant all privileges on *.* to hue@localhost with grant option;
grant all privileges on *.* to hue@'cdh1' with grant option;
FLUSH PRIVILEGES;

FLUSH PRIVILEGES;
CREATE USER oozie IDENTIFIED BY 'oozie';
CREATE USER oozie@localhost IDENTIFIED BY 'oozie';
CREATE USER oozie@'cdh1' IDENTIFIED BY 'oozie';
grant all privileges on *.* to oozie with grant option;
grant all privileges on *.* to oozie@localhost with grant option;
grant all privileges on *.* to oozie@'cdh1' with grant option;
FLUSH PRIVILEGES;


FLUSH PRIVILEGES;
CREATE USER actmonitor IDENTIFIED BY 'actmonitor';
CREATE USER actmonitor@localhost IDENTIFIED BY 'actmonitor';
CREATE USER actmonitor@'cdh1' IDENTIFIED BY 'actmonitor';
grant all privileges on *.* to actmonitor with grant option;
grant all privileges on *.* to actmonitor@localhost with grant option;
grant all privileges on *.* to actmonitor@'cdh1' with grant option;
FLUSH PRIVILEGES;

12、特殊配置(CM 要求):

全部機器均執行。

# echo 0 > /proc/sys/vm/swappiness
# echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag 

13、解除 Linux 系統的最大進程數和最大文件打開數限制:

vi /etc/security/limits.conf

添加如下的行

* soft noproc 11000
* hard noproc 11000
* soft nofile 65535
* hard nofile 65535

14、創建用戶cloudera-scm

# useradd --system --home=/opt/cm-5.8.0/run/cloudera-scm-server --no-create-home --shell=/bin/false --comment "Cloudera SCM User" cloudera-scm

15、準備Parcels文件

準備 CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel.sha1 manifest.json 三個文件,並上傳

# mkdir -p /opt/cloudera/parcel-repo/

從本地上傳

$ scp CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel root@rr1:/opt/cloudera/parcel-repo/
$ scp CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel.sha1 root@rr1:/opt/cloudera/parcel-repo/
$ scp manifest.json root@rr1:/opt/cloudera/parcel-repo/
# cd /opt/cloudera/parcel-repo/
# ls
CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel  CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel.sha1  manifest.json
# mv CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel.sha1 CDH-5.8.0-1.cdh5.8.0.p0.42-el6.parcel.sha

16、 主節點上傳cloudera manager

cloudera manager的目錄默認位置在/opt下,解壓:tar -xzvf cloudera-manager-el6-cm5.8.0_x86_64.tar.gz將解壓後的cm-5.8.0和cloudera目錄放到/opt目錄下。

# tar -xvzf cloudera-manager-el6-cm5.8.0_x86_64.tar.gz

17、安裝mysql驅動

#tar -xvzf mysql-connector-java-5.1.39.tar.gz
#cd mysql-connector-java-5.1.39
#mv mysql-connector-java-5.1.39-bin.jar /opt/cm-5.8.0/share/cmf/lib/

18、執行mysql準備腳本

# /opt/cm-5.8.0/share/cmf/schema/scm_prepare_database.sh mysql cm -hlocalhost -uroot -pyourrootpassword --scm-host cdh1 scm scm scm

運行結果:

JAVA_HOME=/usr/java/jdk1.8.0_60
Verifying that we can write to /opt/cm-5.8.0/etc/cloudera-scm-server
Creating SCM configuration file in /opt/cm-5.8.0/etc/cloudera-scm-server
Executing:  /usr/java/jdk1.8.0_60/bin/java -cp /usr/share/java/mysql-connector-java.jar:/usr/share/java/oracle-connector-java.jar:/opt/cm-5.8.0/share/cmf/schema/../lib/* com.cloudera.enterprise.dbutil.DbCommandExecutor /opt/cm-5.8.0/etc/cloudera-scm-server/db.properties com.cloudera.cmf.db.
2017-01-17 10:11:49,648 [main] INFO  com.cloudera.enterprise.dbutil.DbCommandExecutor  - Successfully connected to database.
All done, your SCM database is configured correctly!

查看配置文件:

# cat /opt/cm-5.8.0/etc/cloudera-scm-server/db.properties

以下爲配置文件信息。

# Auto-generated by scm_prepare_database.sh on 2017年 01月 17日 星期二 10:11:49 CST
#
# For information describing how to configure the Cloudera Manager Server
# to connect to databases, see the "Cloudera Manager Installation Guide."
#
com.cloudera.cmf.db.type=mysql
com.cloudera.cmf.db.host=localhost
com.cloudera.cmf.db.name=cm
com.cloudera.cmf.db.user=scm
com.cloudera.cmf.db.password=scm

19、安裝文件分發

# scp -r /opt/cm-5.8.0/ root@cdh2:/opt/
# scp -r /opt/cm-5.8.0/ root@cdh3:/opt/
# scp -r /opt/cm-5.8.0/ root@cdh4:/opt/

20、啓動 cloudera-scm-server

# /opt/cm-5.8.0/etc/init.d/cloudera-scm-server start

查看啓動日誌

# tail -f /opt/cm-5.8.0/log/cloudera-scm-server/cloudera-scm-server.log
2016-12-29 09:33:42,461 INFO WebServerImpl:org.mortbay.log: Started SelectChannelConnector@0.0.0.0:7180
2016-12-29 09:33:42,461 INFO WebServerImpl:com.cloudera.server.cmf.WebServerImpl: Started Jetty server.
2016-12-29 09:41:35,972 INFO ScmActive-0:com.cloudera.server.cmf.components.ScmActive: ScmActive completed successfully.
2016-12-29 09:33:42,042 ERROR SearchRepositoryManager-0:com.cloudera.server.web.cmf.search.components.SearchRepositoryManager: The server storage directory [/var/lib/clo
udera-scm-server] doesn't exist.
2016-12-29 09:33:42,044 ERROR SearchRepositoryManager-0:com.cloudera.server.web.cmf.search.components.SearchRepositoryManager: No read permission to the server storage d
irectory [/var/lib/cloudera-scm-server]
2016-12-29 09:33:42,044 ERROR SearchRepositoryManager-0:com.cloudera.server.web.cmf.search.components.SearchRepositoryManager: No write permission to the server storage 
directory [/var/lib/cloudera-scm-server]

提示目錄 /var/lib/cloudera-scm-server 不存在,創建目錄

# mkdir /var/lib/cloudera-scm-server

21、所有主機開啓cloudera-scm-agent

# mkdir /opt/cm-5.8.0/run/cloudera-scm-agent
# /opt/cm-5.8.0/etc/init.d/cloudera-scm-agent start

22、進入主界面

server主機的7180端口,用戶名密碼都是admin

這裏寫圖片描述

同意許可

這裏寫圖片描述

選擇Express免費版本。

這裏寫圖片描述

22、開始配置!!!

這裏寫圖片描述

搜索集羣的主機,可以輸入主機名或者IP地址進行搜索。

這裏寫圖片描述

接下來,出現以下包名,說明本地Parcel包配置無誤,直接點繼續就可以了。
如果此處發現不到parcel包,就重啓所有節點的agent服務,和master的server服務。

這裏寫圖片描述

選擇是否安裝oracle jdk

這裏寫圖片描述

選擇是否啓用單用戶模式。否的話,不同的服務有不同的用戶。

這裏寫圖片描述

輸入root密碼

這裏寫圖片描述

集羣安裝

這裏寫圖片描述

此步驟時間比較長,後臺可能出現異常。

# more /opt/cm-5.8.0/log/cloudera-scm-agent/cloudera-scm-agent.log 


[17/Jan/2017 11:19:27 +0000] 2333 MainThread agent        ERROR    Heartbeating to localhost:7182 failed.
Traceback (most recent call last):
  File "/opt/cm-5.8.0/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.8.0-py2.6.egg/cmf/agent.py", line 1206, in _send_heartbeat
    self.master_port)
  File "/opt/cm-5.8.0/lib64/cmf/agent/build/env/lib/python2.6/site-packages/avro-1.6.3-py2.6.egg/avro/ipc.py", line 469, in __init__
    self.conn.connect()
  File "/usr/lib64/python2.6/httplib.py", line 720, in connect
    self.timeout)
  File "/usr/lib64/python2.6/socket.py", line 567, in create_connection
    raise error, msg
error: [Errno 111] Connection refused
[17/Jan/2017 11:20:27 +0000] 2333 MainThread agent        ERROR    Heartbeating to localhost:7182 failed.

修改 /opt/cm-5.8.0/etc/cloudera-scm-agent/config.ini 配置文件中的server_host 設置爲cdh1

[General]
# Hostname of the CM server.
server_host=cdh1

一直沒有反應可以選擇中止回退。

安裝parcel
這裏寫圖片描述

檢查主機的正確性。

這裏寫圖片描述

選擇安裝的服務,根據需要選擇安裝。

這裏寫圖片描述

集羣設置。服務均勻部署在各個節點上。

這裏寫圖片描述

審覈更改。
如果部署了oozie,需要安裝postgresql數據庫

# useradd postgre
# yum install postgresql-server
# service postgresql initdb 
# chkconfig postgresql on restart
# service postgresql start 
# service postgresql status
# sudo -u postgres psql

執行postgresql命令:

create database "sqoopdb";
create user sqoop with password 'sqoop';
GRANT ALL PRIVILEGES ON DATABASE sqoopdb to sqoop;

測試連通性,並查看所有的數據庫

# psql -h cdh1  -U sqoop -d sqoopdb
sqoopdb=> \l

允許遠程連接:

# vi /var/lib/pgsql/data/pg_hba.conf
# TYPE  DATABASE    USER        CIDR-ADDRESS          METHOD

# "local" is for Unix domain socket connections only
local   all         all                               trust
# IPv4 local connections:
host    all         all         127.0.0.1/32          trust
host    all         all         10.10.5.0/24          md5

listen_addresses 默認爲本機,修改爲*,偵聽所有地址

vim /var/lib/pgsql/data/postgresql.conf
# - Connection Settings -
listen_addresses = '*'
#listen_addresses = 'localhost'         # what IP address(es) to listen on;

重啓 postgresql

# service postgresql restart

這裏寫圖片描述

安裝完成啓動

這裏寫圖片描述

集羣設置完成。
這裏寫圖片描述

進入監控界面。
這裏寫圖片描述

其他問題彙總:
1)HDFS NFS Gateway 啓動異常,提示

Cannot connect to port 111.
No portmap or rpcbind service is running on this host. Please start portmap or rpcbind service before attempting to start the NFS Gateway role on this host.

解決方法如下,然後重啓 HDFS NFS Gateway 服務。

 # yum install rpcbind
 # service rpcbind start
 # chkconfig rpcbind on restart

2)啓動hue,提示

ImportError: libxslt.so.1: cannot open shared object file: No such file or directory

安裝 libxslt-python libxslt-devel

# yum install libxslt-python
# yum install libxslt-devel

3)啓動HA,點擊右上角 操作 按鈕,選中 啓用HA
完成HA後,可能需要重新對namenode進行format。

這裏寫圖片描述

4)在啓動spark時,有可能提示Log directory specified does not exist:

問題:

在啓動spark時,有可能提示Log directory specified does not exist: hdfs://cdh1:8020/user/spark/applicationHistory.

日誌:

Exception in thread "main" java.lang.reflect.InvocationTargetException
    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:422)
    at org.apache.spark.deploy.history.HistoryServer$.main(HistoryServer.scala:252)
    at org.apache.spark.deploy.history.HistoryServer.main(HistoryServer.scala)
Caused by: java.lang.IllegalArgumentException: Log directory specified does not exist: hdfs://cdh1:8020/user/spark/applicationHistory.
    at org.apache.spark.deploy.history.FsHistoryProvider.org$apache$spark$deploy$history$FsHistoryProvider$$startPolling(FsHistoryProvider.scala:194)
    at org.apache.spark.deploy.history.FsHistoryProvider.initialize(FsHistoryProvider.scala:146)
    at org.apache.spark.deploy.history.FsHistoryProvider.<init>(FsHistoryProvider.scala:142)
    at org.apache.spark.deploy.history.FsHistoryProvider.<init>(FsHistoryProvider.scala:74)
    ... 6 more

我們手動在hdfs上添加/user/spark/applicationHistory目錄,主要目錄的owner需要是spark用戶,可以先用hdfs用戶新建此目錄,然後使用命令:hdfs dfs –chown –R spark:spark /user/spark/applicationHistory,將擁有者轉換成spark即可。

$ sudo -u hdfs hadoop fs -mkdir /user/spark
$ sudo -u hdfs hadoop fs -mkdir /user/spark/applicationHistory
$ sudo -u hdfs hadoop fs -chown -R spark:spark /user/spark
$ sudo -u hdfs hadoop fs -chmod 1777 /user/spark/applicationHistory

5)Hue 使用hive時報錯:
Hive The application won’t work without a running HiveServer2.

Could not start SASL: Error in sasl_client_start (-4) SASL(-4): no mechanism available: No worthy mechs found (code THRIFTTRANSPORT): TTransportException('Could not start SASL: Error in sasl_client_start (-4) SASL(-4): no mechanism available: No worthy mechs found',)

解決方法

# yum install cyrus-sasl-plain -y
# yum install cyrus-sasl-gssapi -y
# yum install cyrus-sasl-devel -y

6)hue 使用oozie hue安裝運行時候提示:oozie share lib not installed in default location.

# sudo -u hdfs hadoop fs -mkdir -p /user/oozie/share/lib
# sudo -u hdfs hadoop fs -chown -R oozie:oozie /user/oozie
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-distcp-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-hcatalog-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-hive-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-hive2-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-oozie-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-pig-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-spark-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-sqoop-4.1.0-cdh5.8.0.jar /user/oozie/share
# sudo -u oozie hadoop fs -put /opt/cloudera/parcels/CDH-5.8.0-1.cdh5.8.0.p0.42/jars/oozie-sharelib-streaming-4.1.0-cdh5.8.0.jar /user/oozie/share
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章