hbase、hadoop環境搭建及客戶端訪問

一、準備

    1、下載ubuntu系統:ubuntu-17.10.1-server-amd64.iso

        下載地址:https://www.ubuntu.com/download/server

    2、下載jdk:jdk-8u144-linux-x64.tar.gz

        下載地址:http://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html

    3、下載hadoop:hadoop-2.9.0.tar.gz

        下載地址:https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/stable/

      userguide地址:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html

    4、下載hbase:hbase-1.2.6-bin.tar.gz

        下載地址:https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/stable/

        userguide地址:http://hbase.apache.org/book.html#quickstart_pseudo

    5、安裝ubuntu-server系統

    下載並安裝最新版ubuntu系統,系統版本:ubuntu-17.10.1-server-amd64.iso

    下載地址:https://www.ubuntu.com/download/server

二、系統配置

    1、安裝net-tools

        $ sudo apt install net-tools

    2、關閉防火牆

        $ sudo ufw disable

    3、配置ssh

        $ sudo apt-get install ssh

        ssh免密登陸

  $ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
  $ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
  $ chmod 0600 ~/.ssh/authorized_keys

    4、配置rsync

        $ sudo apt-get install rsync

    5、配置系統時間

        $ sudo apt-get install ntpdate

        $ sudo tzselect

        

        依次選擇Asia,China,Beijing Time

        $ sudo cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

        $ sudo ntpdate time.windows.com

    6、安裝vim

        $ sudo apt-get install vim

    7、配置hosts

        $ sudo vim /etc/hosts


三、配置java

    $ mkdir local

    $ mkdir tmp

    $ cd local

    $ tar zxvf  jdk-8u144-linux-x64.tar.gz

    $ vim ../.profile

    export JAVA_HOME=/home/master/local/jdk1.8.0_144
    export JAR_HOME=${JAVA_HOME}/jre
    export CLASS_PATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib

    export PATH=${JAVA_HOME}/bin:$PATH

四、配置hadoop(僞分佈式)

    $ tar zxvf hadoop-2.9.0.tar.gz

    $ cd hadoop-2.9.0/etc/hadoop/

    $ vim core-site.xml

<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://master:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/home/master/tmp</value>
        </property>

</configuration>

    $ vim hdfs-site.xml

<configuration>
        <property>
                <name>dfs.replocation</name>
                <value>1</value>
        </property>
        <property>
                <name>dfs.namenode.hosts</name>
                <value>master</value>
        </property>
</configuration>

    $ vim hadoop-env.sh

        加入export JAVA_HOME=/home/master/local/jdk1.8.0_144

    切換到用戶目錄$ vim .profile加入hadoop環境配置

        export HADOOP_HOME=/home/master/local/hadoop-2.9.0

        export PATH=${HADOOP_HOME}/sbin:$PATH


    切換到hadoop-2.9.0/bin/目錄

    $ ./hdfs namenode -format

    啓動hadoop服務

    $ start-all.sh

    $ ./hdfs dfs -mkdir /hbase

    namenode訪問地址:http://192.168.15.137:50070


    hadoop訪問地址:http://192.168.15.137:8088/cluster


五、hbase配置(僞分佈式)

    切換到local目錄

    $ tar -zxvf hbase-1.2.6-bin.tar.gz

    $ cd hbase-1.2.6/conf/

    $ vim hbase-site.xml

<configuration>

        <property>
                <name>hbase.cluster.distributed</name>
                <value>true</value>
        </property>
        <property>
                <name>hbase.rootdir</name>
                <value>hdfs://master:9000/hbase</value>
        </property>
        <property>
                <name>hbase.zookeeper.property.dataDir</name>
                <value>/home/master/tmp/zookeeper</value>
        </property>
        <property>
                <name>hbase.zookeeper.quorum</name>
                <value>master</value>
        </property>

</configuration>

    $ vim hbase-env.sh

    加入export JAVA_HOME=/home/master/local/jdk1.8.0_144

    切換到用戶目錄$ vim .profile加入hbase環境配置

    export HBASE_HOME=/home/master/local/hbase-1.2.6

    export PATH=${HBASE_HOME}/bin:$PATH

    $ start-hbase.sh

    master@ubuntu:~/local/hadoop-2.9.0/bin$ ./hadoop fs -ls /hbase


hbase訪問地址:http://192.168.15.137:16010/master-status



六、客戶端訪問

1、修改hosts文件

 在C:\Windows\System32\drivers\etc\hosts文件中加入192.168.15.137 master


2、pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>

   <groupId>com.mzw</groupId>
   <artifactId>fzy</artifactId>
   <version>0.0.1-SNAPSHOT</version>
   <packaging>jar</packaging>

   <name>fzy</name>
   <description>Demo project for Spring Boot</description>

   <parent>
      <groupId>org.springframework.boot</groupId>
      <artifactId>spring-boot-starter-parent</artifactId>
      <version>2.0.0.RELEASE</version>
      <relativePath/> <!-- lookup parent from repository -->
   </parent>

   <properties>
      <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
      <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
      <java.version>1.8</java.version>
      <spring-cloud.version>Finchley.M8</spring-cloud.version>
   </properties>

   <dependencies>
      <dependency>
         <groupId>org.springframework.cloud</groupId>
         <artifactId>spring-cloud-starter-netflix-eureka-server</artifactId>
      </dependency>

      <dependency>
         <groupId>org.springframework.boot</groupId>
         <artifactId>spring-boot-starter-test</artifactId>
         <scope>test</scope>
      </dependency>
      <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
      <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
         <version>2.9.0</version>
         <scope>provided</scope>
      </dependency>
      <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
      <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client</artifactId>
         <version>2.9.0</version>
      </dependency>
      <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
      <dependency>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-client</artifactId>
         <version>1.2.6</version>
      </dependency>
      <dependency>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
         <version>12.0.1</version>
      </dependency>


   </dependencies>

   <dependencyManagement>
      <dependencies>
         <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-dependencies</artifactId>
            <version>${spring-cloud.version}</version>
            <type>pom</type>
            <scope>import</scope>
         </dependency>
      </dependencies>
   </dependencyManagement>

   <build>
      <plugins>
         <plugin>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-maven-plugin</artifactId>
         </plugin>
      </plugins>
   </build>

   <repositories>
      <repository>
         <id>spring-milestones</id>
         <name>Spring Milestones</name>
         <url>https://repo.spring.io/milestone</url>
         <snapshots>
            <enabled>false</enabled>
         </snapshots>
      </repository>
      <repository>
         <id>spring-maven2</id>
         <name>Spring maven2</name>
         <url>http://central.maven.org/maven2/</url>
         <snapshots>
            <enabled>false</enabled>
         </snapshots>
      </repository>
   </repositories>


</project>

3、測試類

package com.mzw.fzy;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;


public class SimpleClient {

    static final String rowKey = "row1";
    static HBaseAdmin hBaseAdmin;
    static Configuration conf;

    static {
        conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.property.clientPort", "2181");
        conf.set("hbase.zookeeper.quorum", "192.168.15.137");
        try {
            try {
                hBaseAdmin = new HBaseAdmin(conf);
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        catch (Exception e) {
            e.printStackTrace();
        }

    }


    public static void createTable(String tableName, String[] columns) throws Exception {
        dropTable(tableName);
        HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
        for (String columnName : columns) {
            HColumnDescriptor column = new HColumnDescriptor(columnName);
            hTableDescriptor.addFamily(column);
        }
        hBaseAdmin.createTable(hTableDescriptor);
        System.out.println("create table successed");
    }


    public static void dropTable(String tableName) throws Exception {
        if (hBaseAdmin.tableExists(tableName)) {
            hBaseAdmin.disableTable(tableName);
            hBaseAdmin.deleteTable(tableName);
        }
        System.out.println("drop table successed");
    }


    public static HTable getHTable(String tableName) throws Exception {
        return new HTable(conf, tableName);
    }


    public static void insert(String tableName, Map<String, String> map) throws Exception {
        HTable hTable = getHTable(tableName);
        byte[] row1 = Bytes.toBytes(rowKey);
        Put p1 = new Put(row1);
        for (String columnName : map.keySet()) {
            byte[] value = Bytes.toBytes(map.get(columnName));
            String[] str = columnName.split(":");
            byte[] family = Bytes.toBytes(str[0]);
            byte[] qualifier = null;
            if (str.length > 1) {
                qualifier = Bytes.toBytes(str[1]);
            }
            p1.add(family, qualifier, value);
        }
        hTable.put(p1);
        Get g1 = new Get(row1);
        Result result = hTable.get(g1);
        System.out.println("Get: " + result);
        System.out.println("insert successed");
    }


    public static void delete(String tableName, String rowKey) throws Exception {
        HTable hTable = getHTable(tableName);
        List<Delete> list = new ArrayList<Delete>();
        Delete d1 = new Delete(Bytes.toBytes(rowKey));
        list.add(d1);
        hTable.delete(list);
        Get g1 = new Get(Bytes.toBytes(rowKey));
        Result result = hTable.get(g1);
        System.out.println("Get: " + result);
        System.out.println("delete successed");
    }


    public static void selectOne(String tableName, String rowKey) throws Exception {
        HTable hTable = getHTable(tableName);
        Get g1 = new Get(Bytes.toBytes(rowKey));
        Result result = hTable.get(g1);
        foreach(result);
        System.out.println("selectOne end");
    }


    private static void foreach(Result result) throws Exception {
        for (KeyValue keyValue : result.raw()) {
            StringBuilder sb = new StringBuilder();
            sb.append(Bytes.toString(keyValue.getRow())).append("\t");
            sb.append(Bytes.toString(keyValue.getFamily())).append("\t");
            sb.append(Bytes.toString(keyValue.getQualifier())).append("\t");
            sb.append(keyValue.getTimestamp()).append("\t");
            sb.append(Bytes.toString(keyValue.getValue())).append("\t");
            System.out.println(sb.toString());
        }
    }


    public static void selectAll(String tableName) throws Exception {
        HTable hTable = getHTable(tableName);
        Scan scan = new Scan();
        ResultScanner resultScanner = null;
        try {
            resultScanner = hTable.getScanner(scan);
            for (Result result : resultScanner) {
                foreach(result);
            }
        }
        catch (Exception e) {
            e.printStackTrace();
        }
        finally {
            if (resultScanner != null) {
                resultScanner.close();
            }
        }
        System.out.println("selectAll end");
    }


    public static void main(String[] args) throws Exception {
        String tableName = "tableTest";
        String[] columns = new String[] { "column_A", "column_B" };
        createTable(tableName, columns);
        Map<String, String> map = new HashMap<String, String>();
        map.put("column_A", "AAA");
        map.put("column_B:1", "b1");
        map.put("column_B:2", "b2");
        insert(tableName, map);
        selectOne(tableName, rowKey);
        selectAll(tableName);
        delete(tableName, rowKey);
        dropTable(tableName);
    }

}


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章