Hadoop版本
Hadoop集羣節點分配
Hadoop3.x端口變化
1:安裝系統
2:確定hostname
[root@bigdata1 jdk]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=master
3:設置網絡
4:設置hosts
[root@bigdata1 jdk]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.105 master
192.168.1.206 slave1
192.168.1.106 slave2
5:關閉防火牆
[root@bigdata2 ~]# service iptables stop
iptables:將鏈設置爲政策 ACCEPT:filter [確定]
iptables:清除防火牆規則: [確定]
iptables:正在卸載模塊: [確定]
[root@bigdata2 ~]# chkconfig iptables off
6:設置selinux
[root@bigdata2 ~]# vim /etc/selinux/config
7:創建hadoop用戶
useradd hadoop
passwd hadoop
8:配置ssh免祕鑰登錄(用hadoop用戶設置)
ssh-keygen -t rsa
如果沒有該命令:安裝 yum -y install openssh-clients
ssh-copy-id hadoop@bigdata1
ssh-copy-id hadoop@bigdata2
ssh-copy-id hadoop@bigdata3
9:安裝Java1.8(hadoop用戶)
vim .bash_profile
export JAVA_HOME=/opt/app/jdk/jdk1.8.0_171
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
source .bash_profile
10:安裝hadoop(hadoop用戶)
1:創建hdfs nameNode
mkdir -p /data/hadoop/hdfs/namenode
2:創建hdfs dataNode
mkdir -p /data/hadoop/hdfs/datanode
3:創建hdfs tmp
mkdir -p /data/hadoop/tmp
4:創建yarn nodemanager
mkdir -p /data/hadoop/yarn/nodemanager
5:創建yarn log
mkdir -p /data/hadoop/yarn/logs
6:mr目錄
mkdir -p /data/hadoop/mr
11:解壓hadoop-3.2.0.tar.gz包
12:添加hadoop環境變量
export HADOOP_HOME=/opt/app/hadoop/hadoop-3.2.0
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
13:修改配置文件(core-site.xml)
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
<description>namenode節點地址與端口</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/tmp</value>
<description>臨時文件存儲路徑</description>
</property>
</configuration>
14:修改hadoop-evn.sh文件
export JAVA_HOME=/usr/java/jdk1.8.0_161/
15:修改hdfs-site.xml文件
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/data/hadoop/hdfs/namenode</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/data/hadoop/hdfs/datanode</value>
<final>true</final>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:9001</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
</configuration>
16:修改mapred-site.xml文件
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.map.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.reduce.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>$HADOOP_HOME/share/hadoop/mapreduce/*,$HADOOP_HOME/share/hadoop/mapreduce/lib/*,$HADOOP_HOME/share/hadoop/common/*,$HADOOP_HOME/share/hadoop/common/lib/*,$HADOOP_HOME/share/hadoop/yarn/*,$HADOOP_HOME/share/hadoop/yarn/lib/*,$HADOOP_HOME/share/hadoop/hdfs/*,$HADOOP_HOME/share/hadoop/hdfs/lib/*</value>
</property>
<property>
<name>mapreduce.map.memory.mb</name>
<value>512</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>512</value>
</property>
</configuration>
17:修改yarn-site.xml文件
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.map.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.reduce.env</name>
<value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>$HADOOP_HOME/share/hadoop/mapreduce/*,$HADOOP_HOME/share/hadoop/mapreduce/lib/*,$HADOOP_HOME/share/hadoop/common/*,$HADOOP_HOME/share/hadoop/common/lib/*,$HADOOP_HOME/share/hadoop/yarn/*,$HADOOP_HOME/share/hadoop/yarn/lib/*,$HADOOP_HOME/share/hadoop/hdfs/*,$HADOOP_HOME/share/hadoop/hdfs/lib/*</value>
</property>
<property>
<name>mapreduce.map.memory.mb</name>
<value>512</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>512</value>
</property>
</configuration>
18:添加workers文件內容
slave1
slave2
19:初始化nameNode(master節點)
格式化:hdfs namenode -format
20:啓動hdfs和yarn
啓動HDFS:start-dfs.sh 啓動YARN:start-yarn.sh
21:驗證
hdfs dfs -ls / hdfs dfs -mkdir /user
##將本地文件上傳到hdfs文件系統目錄下
hdfs dfs -copyFromLocal <localFile> <hdfs dir>
hdfs dfs -copyFromLocal workers /user hdfs dfs -cat /user/workers
hadoop jar hadoop-mapreduce-examples-3.1.1.jar pi 5 10
21:訪問
hdfs: http://localhost:9870
yarn: http://localhost:8088