操作系统
安装操作系统
ubuntu-14.04.3-desktop-amd64.iso
修改root用户密码
sudo passwd root
更新操作系统
apt-get update
apt-get upgrade
安装SSH
apt-get install openssh-server
设置ssh无密钥登陆
ssh-keygen -t rsa
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
安装JAVA
解压缩jdk文件到相应目录
jdk-7u79-linux-x64.tar.gz
编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export JAVA_HOME=/apps/java/jdk1.7.0_79
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$JAVA_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量,测试安装
source ~/.bashrc
java -version
安装PYTHON组件
安装pip
apt-get install python-pip
安装numpy
apt-get install python-numpy
安装scipy
apt-get install python-scipy
安装matplotlib
apt-get install python-matplotlib
安装BeautifulSoup
apt-get install python-bs4
安装feedparser
apt-get install python-feedparser
安装Scikit Learn
安装sckit-learn
pip install -U scikit-learn
安装Octave
安装octave
apt-get install octave
安装octave图形界面
apt-get install qtoctave
安装SCALA
解压缩jdk文件到相应目录
scala-2.10.4.tgz
编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export SCALA_HOME=/apps/scala/scala-2.10.4
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量,测试安装
source ~/.bashrc
scala -version
安装MYSQL
安装mysql
apt-get install mysql-server
编辑/etc/mysql/my.cnf文件
------------------------------------------------------
[mysqld]
......
# bind-address = 127.0.0.1
......
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
......
------------------------------------------------------
重启mysql服务
/etc/init.d/mysql restart
初始化Mysql
mysql_secure_installation
安装HADOOP伪分布式
解压缩hadoop到相应目录
hadoop-2.7.1.tar.gz
编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export HADOOP_HOME=/apps/hadoop/hadoop-2.7.1
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量,测试安装
source ~/.bashrc
hadoop version
进入hadoop安装目录下etc/hadoop文件夹,编辑hadoop-env.sh文件,设置JAVA_HOME变量,保存设置
------------------------------------------------------
# The java implementation to use.
# export JAVA_HOME=${JAVA_HOME}
------------------------------------------------------
编辑core-site.xml文件------------------------------------------------------
<configuration><property>
<name>hadoop.tmp.dir</name>
<value>file:/apps_datas/hadoop/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
------------------------------------------------------
编辑hdfs-site.xml文件
------------------------------------------------------
<configuration><property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>file:/apps_datas/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>file:/apps_datas/hadoop/hdfs/data</value>
</property>
</configuration>
------------------------------------------------------
复制mapred-site.xml.template文件,另存为mapred-site.xml。编辑mapred-site.xml文件
------------------------------------------------------
<configuration><property>
<name>mapred.job.tracker</name>
<value>localhost:9001</value>
</property>
</configuration>
------------------------------------------------------
配置完成后,执行 namenode 的格式化hdfs namenode -format
启动hadoop
/apps/hadoop/hadoop-2.7.1/sbin/start-all.sh
检查haddop服务是否启动正常
jps
打开浏览器,观察hadoop情况
http://localhost:50070
运行hadoop伪分布式集群实例,测试部署是否成功
hdfs dfs -mkdir -p /user/hadoop/input
hdfs dfs -put /apps/hadoop/hadoop-2.7.1/etc/hadoop/*.xml input
hadoop jar /apps/hadoop/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+'
若以上指令运行成功,并看到hdfs下有相应文件,则证明hadoop伪分布式集群部署成功
清理测试文件
hdfs dfs -rm -r /user/hadoop/output
hdfs dfs -rm -r /user/hadoop/input
安装HIVE
登录mysql
mysql -u root -p
创建hive数据库
create database hive;
创建hive用户,并赋予其访问hive数据库权限
grant all privileges on hive.* to 'hive'@'%' identified by 'hive';
退出mysql
解压缩hive到相应目录
apache-hive-1.2.1-bin.tar.gz
把mysql驱动复制到hive安装目录下的lib文件夹中
cp mysql-connector-java-5.1.36-bin.jar /apps/hadoop/apache-hive-1.2.1-bin/lib/编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export HIVE_HOME=/apps/hadoop/apache-hive-1.2.1-bin
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量
source ~/.bashrc
进入hive安装目录下conf文件夹
cd /apps/hadoop/apache-hive-1.2.1-bin/conf
复制hive-env.sh.templaete,另存为hive-env.sh
cp hive-env.sh.template hive-env.sh
编辑hive-env.sh文件
nano hive-env.sh
分别设置HADOOP_HOME和HIVE_CONF_DIR两个值
------------------------------------------------------
......
HADOOP_HOME=/apps/hadoop/hadoop-2.7.1
......
export HIVE_CONF_DIR=$HADOOP_HOME/etc/hadoop
复制hive-default.xml.templaete,另存为hive-site.xml
cp hive-default.xml.template hive-site.xml
编辑hive-site.xml文件,修改一下配置项
nanohive-site.xml
------------------------------------------------------
......
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hive</value>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/apps_datas/hive/iotmp/user</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/apps_datas/hive/iotmp/resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/apps_datas/hive/iotmp/user</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/apps_datas/hive/iotmp/user/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</descripti$
</property>
......
------------------------------------------------------
启动hdfs(若未启动)
/apps/hadoop/hadoop-2.7.1/sbin/start-dfs.sh
运行hive
hivecp spark-env.sh.template spark-env.sh
安装Spark单机环境
解压缩Spark到相应目录
spark-1.4.1-bin-hadoop2.6.tgz
编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export SPARK_HOME=/apps/spark/spark-1.4.1-bin-hadoop2.6
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin:$SPARK_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量
source ~/.bashrc
进入spark安装目录下conf文件夹,复制spark-env.sh.template文件,另存为spark-env.sh文件
cp spark-env.sh.template spark-env.sh
编辑spark-env.sh文件
nano spark-env.sh
------------------------------------------------------
export JAVA_HOME=/apps/java/jdk1.7.0_79
export SCALA_HOME=/apps/scala/scala-2.10.4
export HADOOP_CONF_DIR=/apps/hadoop/hadoop-2.7.1/etc/hadoop
export SPARK_MASTER_IP=localhost
export SPARK_MASTER_PORT=7077
export SPARK_WORKER_CORES=2
export SPARK_WORKER_INSTANCES=1
export SPARK_WORKER_MEMORY=2g
------------------------------------------------------
复制slaves.template文件,另存为slaves文件cp slaves.template slaves
启动spark
/apps/spark/spark-1.4.1-bin-hadoop2.6/sbin/start-all.sh
运行jps,观察进程是否启动
jps
运行自带示例
run-example
org.apache.spark.examples.SparkPi
打开浏览器,观察spark控制台
http://localhost:8080/
进入spark-shell
spark-shell
查看jobs等信息
http://localhost:4040
安装Tachyon单机环境
解压缩Tachyon到相应目录
tachyon-0.7.0-hadoop2.4-bin.tar.gz
编辑环境变量
nano ~/.bashrc
------------------------------------------------------
export TACHYON_HOME=/apps/tachyon/tachyon-0.7.0
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$TACHYON_HOME/bin:$PATH
------------------------------------------------------
重新加载环境变量
source ~/.bashrc
进入Tachyon安装目录下conf文件夹,复制tachyon-env.sh.template文件,另存为tachyon-env.sh文件
cp tachyon-env.sh.template tachyon-env.sh
编辑tachyon-env.sh文件
nano tachyon-env.sh
------------------------------------------------------
export JAVA_HOME=/apps/java/jdk1.7.0_79
export TACHYON_MASTER_ADDRESS=localhost
# export TACHYON_UNDERFS_ADDRESS=$TACHYON_HOME/underFSStorage
export TACHYON_UNDERFS_ADDRESS=hdfs://localhost:9000/tachyon
export TACHYON_WORKER_MEMORY_SIZE=2GB
export TACHYON_UNDERFS_HDFS_IMPL=org.apache.hadoop.hdfs.DistributedFileSystem
export TACHYON_WORKER_MAX_WORKER_THREADS=2048
export TACHYON_MASTER_MAX_WORKER_THREADS=2048
------------------------------------------------------
启动hadoop(如果hadoop没有启动)
/apps/hadoop/hadoop-2.7.1/sbin/start-all.sh在HDFS里面,创建tachyon文件夹
hdfs dfs -mkdir /tachyon
初始化tachyon
tachyon format
启动tachyon
tachyon-start.sh local
打开浏览器,输入地址,查看tachyon是否启动成功
http://localhost:19999/
此时,tachyon与hadoop之间已经兼容,接下来整合tachyon与spark,首先把tachyon和hadoop停掉
tachyon-stop.sh
/apps/hadoop/hadoop-2.7.1/sbin/stop-all.sh
进入hadoop安装目录下的etc/hadoop目录,编辑core-site.xml文件,加入一下内容
nano /apps/hadoop/hadoop-2.7.1/etc/hadoop/core-site.xml
------------------------------------------------------
<configuration>
<property>
<name>fs.tachyon.impl</name>
<value>tachyon.hadoop.TFS</value>
</property>
</configuration>
------------------------------------------------------
启动hadoop
/apps/hadoop/hadoop-2.7.1/sbin/start-dfs.sh启动tachyon
tachyon-start.sh local
启动spark
/apps/spark/spark-1.4.1-bin-hadoop2.6/sbin/start-all.sh启动spark-shell
将hdfs中的文件存储到tachyon中
val
s = sc.textFile("hdfs://localhost:9000/user/hadoop/derby.log")
s.saveAsTextFile("tachyon://localhost:19998/test")
val rdd = sc.textFile("tachyon://localhost:19998/test")
rdd.count()
如果以上过程没有出现异常,则tachyon部署成功