hadoop2.6.5完全分佈式搭建(HA)

集羣部署(在完全分佈式搭建完成後進行的局部修改)

搭建zookeeper集羣 

   解壓

[root@node02 ~]# tar -zxvf zookeeper-3.4.6.tar.gz 
[root@node02 ~]# mv zookeeper-3.4.6 /opt/hadoop/

 配置環境變量 (node02,node03,nodeo4都需要配置環境變量)

export JAVA_HOME=/usr/java/jdk1.7.0_67
export HADOOP_HOME=/opt/hadoop/hadoop-2.6.5
export ZOOKEEPER_HOME=/opt/hadoop/zookeeper-3.4.6
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOOKEEPER_HOME/bin

  

重命名

[root@node02 conf]# pwd
/opt/hadoop/zookeeper-3.4.6/conf
[root@node02 conf]# cp zoo_sample.cfg zoo.cfg
[root@node02 conf]# vi zoo.cfg 

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/var/hadoop/zk
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

                server.1=192.168.106.22:2888:3888
                server.2=192.168.106.23:2888:3888
                server.3=192.168.106.24:2888:3888

 創建目錄追加ID

[root@node02 conf]# mkdir -p /var/hadoop/zk
[root@node02 conf]# echo 1 > /var/hadoop/zk/myid

 分發zookeeper包給node03,node04

[root@node02 hadoop]# scp -r ./zookeeper-3.4.6/ node03:/opt/hadoop/
[root@node02 hadoop]# scp -r ./zookeeper-3.4.6/ node04:/opt/hadoop/

 在node03,node04創建目錄,追加id 

[root@node03 hadoop]# mkdir -p /var/hadoop/zk
[root@node03 hadoop]# echo 2 > /var/hadoop/zk/myid

[root@node04 ~]# mkdir -p /var/hadoop/zk
[root@node04 ~]# echo 3 > /var/hadoop/zk/myid

 開啓zookeeper集羣 

[root@node02 hadoop]# zkServer.sh start
[root@node03 hadoop]# zkServer.sh start
[root@node04 hadoop]# zkServer.sh start

 配置HDSF

 [root@node01 hadoop]# vi hdfs-site.xml 

    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>



<property>
  <name>dfs.nameservices</name>
  <value>mycluster</value>
</property>

<property>
  <name>dfs.ha.namenodes.mycluster</name>
  <value>nn1,nn2</value>
</property>

<property>
  <name>dfs.namenode.rpc-address.mycluster.nn1</name>
  <value>node01:8020</value>
</property>

<property>
  <name>dfs.namenode.rpc-address.mycluster.nn2</name>
  <value>node02:8020</value>
</property>

<property>
  <name>dfs.namenode.http-address.mycluster.nn1</name>
  <value>node01:50070</value>
</property>

<property>
  <name>dfs.namenode.http-address.mycluster.nn2</name>
  <value>node02:50070</value>
</property>

<property>
  <name>dfs.namenode.shared.edits.dir</name>
  <value>qjournal://node01:8485;node02:8485;node03:8485/mycluster</value>
</property>

<property>
  <name>dfs.journalnode.edits.dir</name>
  <value>/var/hadoop/ha/jn</value>
</property>

<property>
  <name>dfs.client.failover.proxy.provider.mycluster</name>  <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
  <name>dfs.ha.fencing.methods</name>
  <value>sshfence</value>
</property>

<property>
  <name>dfs.ha.fencing.ssh.private-key-files</name>
  <value>/root/.ssh/id_dsa</value>
</property>

<property>
   <name>dfs.ha.automatic-failover.enabled</name>
   <value>true</value>
 </property>

  [root@node01 hadoop]# vi core-site.xml

    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://mycluster</value>
    </property>



    <property>
        <name>hadoop.tmp.dir</name>
        <value>/var/hadoop/ha</value>
    </property>


<property>
   <name>ha.zookeeper.quorum</name>
   <value>node02:2181,node03:2181,node04:2181</value>
 </property>

 分發配置文件

 

ZKFC使用密鑰(node01,node02相互通信)

[root@node02 ~]# ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
[root@node02 ~]# cd .ssh
[root@node02 .ssh]# ll
total 20
-rw-r--r-- 1 root root  601 Jan  8 22:25 authorized_keys
-rw------- 1 root root  668 Jan  9 01:02 id_dsa
-rw-r--r-- 1 root root  601 Jan  9 01:02 id_dsa.pub
-rw-r--r-- 1 root root 1197 Jan  9 00:33 known_hosts
-rw-r--r-- 1 root root  601 Jan  8 22:25 node01.pub
[root@node02 .ssh]# cat id_dsa.pub >> authorized_keys 
[root@node02 .ssh]# ssh node02
[root@node02 ~]# exit
logout
Connection to node02 closed.
[root@node02 .ssh]# scp id_dsa.pub node01:/root/.ssh/node02.pub
root@node01's password: 
id_dsa.pub                                                                                                100%  601     0.6KB/s   00:00    


[root@node01 .ssh]# ll
-rw-r--r-- 1 root root 1202 Jan  9 01:04 authorized_keys
-rw------- 1 root root  668 Jan  8 19:23 id_dsa
-rw-r--r-- 1 root root  601 Jan  8 19:23 id_dsa.pub
-rw-r--r-- 1 root root 2003 Jan  8 22:30 known_hosts
-rw-r--r-- 1 root root  601 Jan  9 01:04 node02.pub

[root@node01 .ssh]# cat node02.pub >> authorized_keys



[root@node02 .ssh]# ssh node01
Last login: Tue Jan  8 22:54:27 2019 from 192.168.106.1
[root@node01 ~]# exit

開啓journalnode

[root@node01 ~]# hadoop-daemon.sh start journalnode
[root@node02 ~]# hadoop-daemon.sh start journalnode
[root@node03 ~]# hadoop-daemon.sh start journalnode

格式化 

 

啓動namenode

[root@node01 ~]# hadoop-daemon.sh start namenode

 

[root@node02 ~]# hdfs namenode -bootstrapStandby

格式化ZK 

開啓集羣

查看zookeeper目錄樹中的信息(鎖的相關信息)

 通過web頁面進行訪問

模擬HA操作(name和zkfc分別掛了進行模擬操作) 

先殺死namenode

 

把namenode進行恢復重啓

 

zkfc掛掉

重新開啓ZKFC

ZKFC原理:(進行監控)

 

誰被殺死了對應節點就會被刪掉         觸發事件  進行(狀態)切換

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章