012 hadoop集羣HA搭建

1.創建普通用戶

for i in {1..3};do ssh node$i 'useradd hadoop;echo "123456" | passwd --stdin hadoop';done

2.將普通用戶加入至sudoers 文件中(可以免密使用sudo指令)。

   vim  /etc/sudoers  追加以下內容

hadoop    ALL=(ALL)       NOPASSWD: ALL

  將/etc/sudoers拷貝至其他節點

for i in {2..3};do scp /etc/sudoers node$i:/etc/sudoers;done

3.修改/usr/modules的所有者爲hadoop

#修改權限
for i in {1..3};do ssh node$i 'chown hadoop:hadoop /usr/modules/';done

4.所有節點切換至普通用戶hadoop

      su - hadoop

5.上傳安裝包至node1節點/usr/modules/software

6.免密登錄(node1免密登錄node1,node2,node3    node3免密登錄node1,node2,node3)

      免密登錄

      安裝psmisc(解決主備無法正常切換)

      sudo yum install psmisc -y

7.解壓

     tar -zxf hadoop-2.9.2.tar.gz -C /usr/modules/

8.配置hadoop環境變量

   su - root

  vim  /etc/profile 

export JAVA_HOME=/usr/modules/jdk1.8.0_191
export HADOOP_HOME=/usr/modules/hadoop-2.9.2
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH

將/etc/profile拷貝至另外2臺節點,所有節點重新加載環境變量

  for i in {2..3};do scp /etc/profile node$i:/etc/profile;done

.   /etc/profile

9.修改配置文件

   vim  /usr/modules/hadoop-2.9.2/etc/hadoop/hadoop-env.sh

#修改25行如下
export JAVA_HOME=/usr/modules/jdk1.8.0_191

   vim  /usr/modules/hadoop-2.9.2/etc/hadoop/core-site.xml


<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
   <!-- 指定集羣的虛擬名稱 -->
   <property>
        <name>fs.defaultFS</name>
        <value>hdfs://mycluster</value>
    </property>

    <property>
        <name>hadoop.tmp.dir</name>
        <value>/usr/modules/hadoop-2.9.2/data</value>
    </property>
    <property>
        <name>ha.zookeeper.quorum</name>
        <value>node1:2181,node2:2181,node3:2181</value>
    </property>
</configuration>

    vim  /usr/modules/hadoop-2.9.2/etc/hadoop/hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.nameservices</name>
        <value>mycluster</value>
    </property>
    <property>
        <name>dfs.ha.namenodes.mycluster</name>
        <value>nn1,nn2</value>
    </property>
    <property>
        <name>dfs.namenode.rpc-address.mycluster.nn1</name>
        <value>node1:8020</value>
    </property>
    <property>
        <name>dfs.namenode.rpc-address.mycluster.nn2</name>
        <value>node3:8020</value>
    </property>
    <property>
        <name>dfs.namenode.http-address.mycluster.nn1</name>
        <value>node1:50070</value>
    </property>
    <property>
        <name>dfs.namenode.http-address.mycluster.nn2</name>
        <value>node3:50070</value>
    </property>
    <property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://node1:8485;node2:8485/mycluster</value>
    </property>
    <property>
        <name>dfs.journalnode.edits.dir</name>
        <value>/usr/modules/hadoop-2.9.2/jn/data</value>
    </property>
    <property>
        <name>dfs.client.failover.proxy.provider.mycluster</name>
     <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
      <name>dfs.ha.fencing.methods</name>
      <value>sshfence</value>
    </property>
    <property>
      <name>dfs.ha.fencing.ssh.private-key-files</name>
      <value>/home/hadoop/.ssh/id_rsa</value>
    </property>
    <property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>
</configuration>

  vim  /usr/modules/hadoop-2.9.2/etc/hadoop/slaves

node1
node2
node3

vim   /usr/modules/hadoop-2.9.2/etc/hadoop/yarn-site.xml

<?xml version="1.0"?>
<configuration>
<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>node2:8089</value>
    </property>
</configuration>
</configuration>
~                   

mv   /usr/modules/hadoop-2.9.2/etc/hadoop/mapred-site.xml.template /usr/modules/hadoop-2.9.2/etc/hadoop/mapred-site.xml

vim  /usr/modules/hadoop-2.9.2/etc/hadoop/mapred-site.xml

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.application.classpath</name>
        <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
    </property>
</configuration>

10.集羣拷貝

  for i in {2,3};do scp -r /usr/modules/hadoop-2.9.2 node$i:/usr/modules;done > /dev/null

11.啓動集羣

1、啓動zookeeper集羣
2、分別啓動journalnode
 hadoop-daemon.sh start journalnode
3、其中一個namenode上格式化集羣
 hdfs namenode -format
4、啓動
 hadoop-daemon.sh start namenode
5、另外一臺namenode同步數據
 hdfs namenode -bootstrapStandby
6、格式化zookeeper
 hdfs zkfc -formatZK
7、啓動hadoop集羣
 start-dfs.sh

12.單獨啓動 namenode、datanode

     hadoop-daemon.sh start namenode|datanode

     單獨啓動進程

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章