直接接Hbase&HbaseApi實現模糊分頁查詢


之前分享了基於spring-boot-data集成Hbase、直接集成Hbase,今天分享使用封裝好的一些接口集成Hbase,順便謝謝Hbase的查詢。廢話也不多說,直接上碼。
一、jar引入

<dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.0.0</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>javax.servlet</groupId>
                    <artifactId>servlet-api</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-shaded-client</artifactId>
            <version>2.1.0</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>3.0.0</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

這裏的版本還是可以自行更改,我這裏因爲項目固定的就是2.1+3.0了。
二、配置類
HbaseClientProperties


import org.springframework.boot.context.properties.ConfigurationProperties;

/**
 * @author zhengwen
 */
@ConfigurationProperties(prefix = "hbase")
public class HbaseClientProperties {
    private Integer zkClientPort;
    private String zkQuorum;
    private String zkParentNode;
    private Integer rpcTimeout;
    private Integer retriesNumber;
    private Integer retriesPause;
    private Integer operationTimeout;
    private Integer scannerTimeout;
    private Integer poolCoreSize = 30;
    private Integer poolMaxSize = 100;
    private Long poolKeepAlive = 60L;

    private String[] zkClusterKey;

    private String[] restNode;

    private boolean tagUseMap;
    private Integer tagBatchOperationTimeout;
    private Integer tagBatchRpcTimeout;

    public Integer getZkClientPort() {
        return zkClientPort;
    }

    public void setZkClientPort(Integer zkClientPort) {
        this.zkClientPort = zkClientPort;
    }

    public String getZkQuorum() {
        return zkQuorum;
    }

    public void setZkQuorum(String zkQuorum) {
        this.zkQuorum = zkQuorum;
    }

    public Integer getRpcTimeout() {
        return rpcTimeout;
    }

    public void setRpcTimeout(Integer rpcTimeout) {
        this.rpcTimeout = rpcTimeout;
    }

    public Integer getRetriesNumber() {
        return retriesNumber;
    }

    public void setRetriesNumber(Integer retriesNumber) {
        this.retriesNumber = retriesNumber;
    }

    public Integer getOperationTimeout() {
        return operationTimeout;
    }

    public void setOperationTimeout(Integer operationTimeout) {
        this.operationTimeout = operationTimeout;
    }

    public Integer getPoolCoreSize() {
        return poolCoreSize;
    }

    public void setPoolCoreSize(Integer poolCoreSize) {
        this.poolCoreSize = poolCoreSize;
    }

    public Integer getPoolMaxSize() {
        return poolMaxSize;
    }

    public void setPoolMaxSize(Integer poolMaxSize) {
        this.poolMaxSize = poolMaxSize;
    }

    public Long getPoolKeepAlive() {
        return poolKeepAlive;
    }

    public void setPoolKeepAlive(Long poolKeepAlive) {
        this.poolKeepAlive = poolKeepAlive;
    }

    public String[] getRestNode() {
        return restNode;
    }

    public void setRestNode(String[] restNode) {
        this.restNode = restNode;
    }

    public String getZkParentNode() {
        return zkParentNode;
    }

    public void setZkParentNode(String zkParentNode) {
        this.zkParentNode = zkParentNode;
    }

    public Integer getScannerTimeout() {
        return scannerTimeout;
    }

    public void setScannerTimeout(Integer scannerTimeout) {
        this.scannerTimeout = scannerTimeout;
    }

    public Integer getRetriesPause() {
        return retriesPause;
    }

    public void setRetriesPause(Integer retriesPause) {
        this.retriesPause = retriesPause;
    }

    public boolean isTagUseMap() {
        return tagUseMap;
    }

    public void setTagUseMap(boolean tagUseMap) {
        this.tagUseMap = tagUseMap;
    }

    public Integer getTagBatchOperationTimeout() {
        return tagBatchOperationTimeout;
    }

    public void setTagBatchOperationTimeout(Integer tagBatchOperationTimeout) {
        this.tagBatchOperationTimeout = tagBatchOperationTimeout;
    }

    public Integer getTagBatchRpcTimeout() {
        return tagBatchRpcTimeout;
    }

    public void setTagBatchRpcTimeout(Integer tagBatchRpcTimeout) {
        this.tagBatchRpcTimeout = tagBatchRpcTimeout;
    }

    public String[] getZkClusterKey() {
        return zkClusterKey;
    }

    public void setZkClusterKey(String[] zkClusterKey) {
        this.zkClusterKey = zkClusterKey;
    }
}

HbaseClientConfiguration


import com.alibaba.ttl.threadpool.TtlExecutors;
import com.fillersmart.g20.dc.data.api.hbase.ConnectionManager;
import com.fillersmart.g20.dc.data.api.hbase.HbaseTemplate;
import com.google.common.io.Resources;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.io.IOException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 * @author zhangyw
 * @date 2019/7/31 17:33
 */
@Configuration
@EnableConfigurationProperties(HbaseClientProperties.class)
public class HbaseClientConfiguration {

    /**
     * 初始化 hbase read pool配置
     *
     * @param hbaseClientProperties prop
     * @return executor
     * @see HConstants
     */
    @Bean(name = "hbaseReadPool", destroyMethod = "shutdown")
    public ExecutorService hbaseReadPool(HbaseClientProperties hbaseClientProperties) {
        ThreadFactoryBuilder threadFactoryBuilder =
                new ThreadFactoryBuilder()
                        .setDaemon(true)
                        .setNameFormat("hbase-read-%d");

        ThreadPoolExecutor executor =
                new ThreadPoolExecutor(
                        hbaseClientProperties.getPoolCoreSize(),
                        hbaseClientProperties.getPoolMaxSize(),
                        hbaseClientProperties.getPoolKeepAlive(),
                        TimeUnit.SECONDS, new SynchronousQueue<>(),
                        threadFactoryBuilder.build());
        // init pool
        executor.prestartCoreThread();

//        return executor;
        return TtlExecutors.getTtlExecutorService(executor);
    }

    @Bean
    public ConnectionManager connectionManager(
            HbaseClientProperties hbaseClientProperties,
            ExecutorService hbaseReadPool) throws IOException {

        org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.ZOOKEEPER_QUORUM, hbaseClientProperties.getZkQuorum());
        conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, hbaseClientProperties.getZkClientPort());
        conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, hbaseClientProperties.getZkParentNode());
        conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, hbaseClientProperties.getRpcTimeout());
        conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, hbaseClientProperties.getRetriesNumber());
        conf.setInt(HConstants.HBASE_CLIENT_PAUSE, hbaseClientProperties.getRetriesPause());
        conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, hbaseClientProperties.getOperationTimeout());
        conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, hbaseClientProperties.getScannerTimeout());
        conf.addResource(Resources.getResource("hbase-site.xml"));
        // ipc 配置信息
//        conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "");
//        conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 10);

        User user = User.create(UserGroupInformation.createRemoteUser("bigdata"));

        ConnectionManager connectionManager = new ConnectionManager();

        if (hbaseClientProperties.getZkClusterKey() != null) {
            for (String clusterKey : hbaseClientProperties.getZkClusterKey()) {
                connectionManager.addConnection(
                        clusterKey,
                        ConnectionFactory
                                .createConnection(
                                        HBaseConfiguration.createClusterConf(conf, clusterKey),
                                        hbaseReadPool, user));
            }
        } else {
            connectionManager.addConnection("master", ConnectionFactory.createConnection(conf));
        }

        return connectionManager;
    }

    @Bean(name = "hbaseTemplate")
    public HbaseTemplate hbaseTemplate(ConnectionManager connectionManager) throws IOException {
        return new HbaseTemplate(connectionManager);
    }

}

三、配置文件
yml文件


hbase:
  zkClientPort: 2181
  zkQuorum: bigdata03,bigdata01,bigdata02
  zkParentNode: /hbase
  rpcTimeout: 60000
  retriesNumber: 10
  retriesPause: 100
  operationTimeout: 1200000
  scannerTimeout: 60000
  poolCoreSize: 30
  poolMaxSize: 100
  poolKeepAlive: 60
  properties:
    query.limit: 500
    img.url.prefix: http://192.168.200.77

hbase-site.xml
如果有其他配置補充,都是寫在這個xml文件裏。這個文件可以找運維要,讓他把安裝目錄下的這個xml文件給你,不過也可以直接寫,如果與安裝目錄的配置不一樣,這裏的連接將會覆蓋原配置。
四、封裝接口
ConnectionManager


import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.scheduling.annotation.Scheduled;

import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * hbase 鏈接管理器,支持多 connection 切換
 */
public class ConnectionManager implements InitializingBean, DisposableBean {

    private static Logger log = LoggerFactory.getLogger(ConnectionManager.class);

    private final AtomicBoolean running = new AtomicBoolean(false);

    private static Map<String, Connection> connMap = new LinkedHashMap<>();

    private static Connection master;

    private static String masterClusterKey;

    public void addConnection(String clusterKey, Connection conn) {
        connMap.put(clusterKey, conn);
    }

    @Scheduled(initialDelayString = "30000", fixedDelayString = "10000")
    public void watchHbaseServer() {
        if (this.running.get()) {
            if (log.isDebugEnabled())
                log.debug("hbase server start checking ...");

            Connection m = null;
            String clusterKey = null;
            for (Map.Entry<String, Connection> entry : connMap.entrySet()) {

                clusterKey = entry.getKey();
                if (log.isDebugEnabled())
                    log.debug("checking {} connection", clusterKey);
                if (checkConn(clusterKey, entry.getValue())) {
                    if (log.isDebugEnabled())
                        log.debug("hbase server {} is available ", clusterKey);
                    m = entry.getValue();
                    break;
                }

                log.debug("hbase server {} is unavailable, check next", clusterKey);
            }

            if (m == null) {
                log.error("no available hbase server can use, please check");
            } else if (!masterClusterKey.equalsIgnoreCase(clusterKey)) { // 有變化改變內存值
                master = m;
                masterClusterKey = clusterKey;
                if (log.isDebugEnabled()) {
                    log.debug("hbase server changed to {} ", clusterKey);
                }
            }

            if (log.isDebugEnabled()) {
                log.info("hbase server checked over, use {} connection ", masterClusterKey);
            }
        }
    }

    /**
     * 檢查鏈接是否可用
     *
     * @param clusterKey 集羣key
     * @param conn       鏈接
     * @return true false
     */
    private boolean checkConn(String clusterKey, Connection conn) {
        try {
            return ((ClusterConnection) conn).isMasterRunning();
        } catch (IOException e) {
            log.debug("hbase server {} has down", clusterKey);
            return false;
        }
    }

    public Connection connect() {
        return master;
    }


    public void close() {
//        for (Map.Entry<String, Connection> entry : connMap.entrySet()) {
//            entry.getValue().close();
//        }

        connMap.forEach((key, con) -> {
            try {
                con.close();
            } catch (IOException e) {
                log.error("close connection", e);
            }
        });

        this.running.compareAndSet(true, false);
    }

    @Override
    public void destroy() throws Exception {
        this.close();
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        for (Map.Entry<String, Connection> entry : connMap.entrySet()) {
            if (checkConn(entry.getKey(), entry.getValue())) {
                master = entry.getValue();
                masterClusterKey = entry.getKey();
                log.info("use hbase server {}", entry.getKey());
                break;
            }
        }

        if (master == null) {
            //throw new IllegalStateException("no available hbase connection can use, please check");
            log.error("no available hbase connection can use, please check");
        }

        this.running.compareAndSet(false, true);
    }
}

HbaseTemplate


import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.Assert;
import org.springframework.util.StopWatch;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
 */
public class HbaseTemplate {

    private static final Logger LOGGER = LoggerFactory.getLogger(HbaseTemplate.class);

    private ConnectionManager connectionManager;

    public HbaseTemplate() {
    }

    public HbaseTemplate(ConnectionManager connectionManager) {
        this.connectionManager = connectionManager;
    }

    /**
     * 執行hbase action
     *
     * @param tableName        表名
     * @param operationTimeout 操作超時時間 ms
     * @param action           操作
     * @param <T>              return
     * @return T
     */
    public <T> T execute(String tableName, int operationTimeout, TableCallback<T> action) {
        Assert.notNull(action, "Callback object must not be null");
        Assert.notNull(tableName, "No table specified");

        StopWatch sw = new StopWatch();
        sw.start();
        Table table = null;
        try {
            Connection connection = this.getConnection();
            table = connection.getTable(TableName.valueOf(tableName));

            if (table instanceof HTable) {
                if (operationTimeout > 0) {
                    ((HTable) table).setOperationTimeout(operationTimeout);
                } else {
                    ((HTable) table).setOperationTimeout(
                            connection.getConfiguration()
                                    .getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
                                            HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
                }
            }

            return action.doInTable(table);
        } catch (Throwable throwable) {
            throw new HbaseSystemException(throwable);
        } finally {
            if (null != table) {
                try {
                    table.close();
                    sw.stop();
                } catch (IOException e) {
                    LOGGER.error("hbase資源釋放失敗");
                }
            }
        }
    }

    public <T> T execute(String tableName, TableCallback<T> action) {
        return execute(tableName, 0, action);
    }

    public <T> List<T> find(String tableName, String family, final RowMapper<T> action) {
        Scan scan = new Scan();
        scan.setCaching(5000);
        scan.addFamily(Bytes.toBytes(family));
        return this.find(tableName, scan, action);
    }

    public <T> List<T> find(String tableName, String family, String qualifier, final RowMapper<T> action) {
        Scan scan = new Scan();
        scan.setCaching(5000);
        scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
        return this.find(tableName, scan, action);
    }

    public <T> List<T> find(String tableName, final Scan scan, final RowMapper<T> action) {
        return this.execute(tableName, new TableCallback<List<T>>() {
            @Override
            public List<T> doInTable(Table table) throws Throwable {
                int caching = scan.getCaching();
                // 如果caching未設置(默認是1),將默認配置成5000
                if (caching == 1) {
                    scan.setCaching(5000);
                }
                try (ResultScanner scanner = table.getScanner(scan)) {
                    List<T> rs = new ArrayList<T>();
                    int rowNum = 0;
                    for (Result result : scanner) {
                        rs.add(action.mapRow(result, rowNum++));
                    }
                    return rs;
                }
            }
        });
    }


    public <T> T get(String tableName, String rowName, final RowMapper<T> mapper) {
        return this.get(tableName, rowName, null, null, mapper);
    }

    public <T> T get(String tableName, String rowName, String familyName, final RowMapper<T> mapper) {
        return this.get(tableName, rowName, familyName, null, mapper);
    }

    public <T> T get(String tableName, final String rowName, final String familyName, final String qualifier, final RowMapper<T> mapper) {
        Get get = new Get(Bytes.toBytes(rowName));
        if (StringUtils.isNotBlank(familyName)) {
            byte[] family = Bytes.toBytes(familyName);
            if (StringUtils.isNotBlank(qualifier)) {
                get.addColumn(family, Bytes.toBytes(qualifier));
            } else {
                get.addFamily(family);
            }
        }

        return this.get(tableName, get, mapper);
    }

    public <T> T get(String tableName, Get get, final RowMapper<T> mapper) {
        return this.execute(tableName, table -> {
            Result result = table.get(get);
            return mapper.mapRow(result, 0);
        });

    }

    public Connection getConnection() {
        return this.connectionManager.connect();
    }

}

RowMapper


import org.apache.hadoop.hbase.client.Result;

/**
 * Callback for mapping rows of a {@link ResultScanner} on a per-row basis.
 * Implementations of this interface perform the actual work of mapping each row to a result object, but don't need to worry about exception handling.
 *
 * @author Costin Leau
 */

/**
 * JThink@JThink
 *
 * @author JThink
 * @version 0.0.1
 * desc: copy from spring data hadoop hbase, modified by JThink, use the 1.0.0 api
 * date: 2016-11-15 15:42:46
 */
public interface RowMapper<T> {

    T mapRow(Result result, int rowNum) throws Exception;
}

TableCallback


import org.apache.hadoop.hbase.client.Table;

/**
 * Callback interface for Hbase code. To be used with {@link HbaseTemplate}'s execution methods, often as anonymous classes within a method implementation without
 * having to worry about exception handling.
 *
 * @author Costin Leau
 */

/**
 * JThink@JThink
 *
 * @author JThink
 * @version 0.0.1
 * desc: copy from spring data hadoop hbase, modified by JThink, use the 1.0.0 api
 * date: 2016-11-15 14:49:52
 */
public interface TableCallback<T> {

    /**
     * Gets called by {@link HbaseTemplate} execute with an active Hbase table. Does need to care about activating or closing down the table.
     *
     * @param table active Hbase table
     * @return a result object, or null if none
     * @throws Throwable thrown by the Hbase API
     */
    T doInTable(Table table) throws Throwable;
}

基本上就這4個封裝接口,應該事夠用了,不夠可以繼續自己擴展。
五、使用
注入HbaseTemplate,就可以使用裏面的相關方法了。我這裏做的一個模糊分頁查詢沒有使用這個模板類裏面的方法,而是我原生寫的。採用scan + filterList實現,本來事有2種實現機制的,一種:傳入頁碼、每頁顯示、過濾條件、rowkey區間(生成rowkey的條件),但是這種跟mysql一樣,越是後面的頁越慢,所以我已廢棄。採用新的流式模糊分頁查詢,思路大致類似,利用startRowKey,每頁返回數據的第一個、最後一個rowkey,最後一個rowkey事下一頁的開始rowkey。這裏不方便暴露rowkey的生成方法,所以就說下重點:
入參對象QueryParam


import lombok.Data;

import java.util.List;

/**
 * @author zhengwen
 **/
@Data
public class QueryParam {

    /**
     * 表編號
     */
    private String tableCode;

    /**
     * 設備編號
     */
    private String deviceCode;

    /**
     * 開始時間,格式:yyyy-MM-dd HH:mm:ss
     */
    private String startTime;

    /**
     * 結束時間,格式:yyyy-MM-dd HH:mm:ss
     */
    private String endTime;

    /**
     * 條件過濾
     */
    private List<QueryFilter> filterList;

    /**
     * 頁面信息
     */
    private PageInfoParam pageInfoParam;


}

QueryFilter

import lombok.Data;

/**
 * @author zhengwen
 **/
@Data
public class QueryFilter {
    /**
     * 過濾列名
     */
    private String filterCol;
    /**
     * 過濾值
     */
    private String filterVal;
    /**
     * 過濾關係
     */
    private Integer filterRel;


}

PageInfoParam


import lombok.Data;

/**
 * @author zhengwen
 **/
@Data
public class PageInfoParam {
    /**
     * 開始rowKey
     */
    private String startRowKey;
    /**
     * 結束rowKey
     */
    private String endRowKey;

    /**
     * 頁碼
     */
    private Integer pageNum;

    /**
     * 每頁顯示
     */
    private Integer pageSize;

    /**
     * 是否還有下一頁
     */
    private boolean hasNext;

}

實現類的查詢方法

 @Override
    public List<?> fuzzyQueryRecord(QueryParam queryParam) {
        //表名
        String tableName = TableUtil.getTableNameBy(queryParam.getTableCode(), redisUtil);
        List<Map<String, String>> dataList = null;

        //獲取scan對象
        Scan scan = QueryParamUtil.initGetScan();

        //設置rowkey區間
        QueryParamUtil.setRowKeyRange(scan,queryParam);

        //設置filter過濾條件
        QueryParamUtil.addScanFilter(scan,queryParam);

        //設置流式查詢
        QueryParamUtil.addStreamQuery(scan,queryParam,queryLimit);

        try {
            Table table = hbaseTemplate.getConnection().getTable(TableName.valueOf(tableName));
            ResultScanner scanner = table.getScanner(scan);
            dataList = HbasePageUtil.getResultScannerData(scanner);
        } catch (IOException e) {
            log.error("----模糊查詢record異常:{}", e.getMessage(), e);
            return null;
        }
        //補充圖片url前綴
        HbaseDataUtil.addImgUrlPrefix(dataList,imgUrlPrefix);
        log.debug("----查詢數據結果:{}", JSON.toJSONString(dataList));

        return dataList;
    }

QueryParamUtil工具類
很抱歉不能分享rowkey的生成方法,每個公司根據自己的業務應該有自己的生成規則,頁不缺我這個規則。這裏面是模糊查詢用到的關鍵工具方法,主要是scan的獲取、filterList的設置、流式實現的邏輯(上一次的endrowkey是這一次查詢的startrowkey)


import com.fillersmart.g20.dc.core.result.Result;
import com.fillersmart.g20.dc.core.result.ResultGenerator;
import com.fillersmart.g20.dc.core.util.HbaseRowKeyUtil;
import com.fillersmart.g20.dc.data.api.constant.Constant;
import com.fillersmart.g20.dc.data.api.constant.FilterRelEnum;
import com.fillersmart.g20.dc.data.api.model.PageInfoParam;
import com.fillersmart.g20.dc.data.api.model.QueryFilter;
import com.fillersmart.g20.dc.data.api.model.QueryParam;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.springframework.util.CollectionUtils;

import java.util.Date;
import java.util.List;

/**
 * hbase的查詢參數工具類
 * @author zhengwen
 **/
@Slf4j
public class QueryParamUtil {
   

    /**
     * 初始化獲取scan,scan的優化再這裏統一處理
     * @return scan對象
     */
    public static Scan initGetScan() {
        Scan scan = new Scan();
        //hbase掃描時的緩存
        //scan.setCaching(50000);
        //一次掃描數據
        //scan.setBatch(1000);

        return scan;
    }

    /**
     * 對scan對象增加filter過濾
     * @param scan scan對象
     * @param queryParam 查詢條件對象
     */
    public static void addScanFilter(Scan scan, QueryParam queryParam) {
        //解析查詢條件
        List<QueryFilter> queryFilterList = queryParam.getFilterList();
        if (!CollectionUtils.isEmpty(queryFilterList)){
            //設置多個filter是and關係
            FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
            for (QueryFilter fit:queryFilterList){
                String fitCol = fit.getFilterCol();
                String fitVal = fit.getFilterVal();
                Integer fitRel = fit.getFilterRel();
                addScanFilterCol(filterList,fitCol,fitVal,fitRel);
            }
            scan.setFilter(filterList);
        }

    }

    /**
     * 補充scan的過濾器
     * @param filterList 過濾器list
     * @param fitCol 列字段
     * @param fitVal 過濾值
     * @param fitRel 比較關係
     */
    private static void addScanFilterCol(FilterList filterList, String fitCol, String fitVal, Integer fitRel) {
        if (filterList == null || StringUtils.isAnyBlank(fitCol,fitVal) || fitRel == null){
            log.debug("add scan filterCol fail,param has null");
        }else{
            byte[] family = Bytes.toBytes(Constant.COL_FAMILY);
            byte[] qualifier = Bytes.toBytes(fitCol);
            byte[] fitValBytes = Bytes.toBytes(fitVal);

            SingleColumnValueFilter scvf = null;
            //相等
            if (FilterRelEnum.FILTER_EQ.getRelCode().equals(fitRel)){
                BinaryComparator comp = new BinaryComparator(fitValBytes);
                scvf = new SingleColumnValueFilter(family, qualifier, CompareFilter.CompareOp.EQUAL, comp);
            }
            //相似
            if (FilterRelEnum.FILTER_LIKE.getRelCode().equals(fitRel)){
                SubstringComparator comp = new SubstringComparator(fitVal);
                //RegexStringComparator comp = new RegexStringComparator(fitVal);
                scvf = new SingleColumnValueFilter(family, qualifier, CompareFilter.CompareOp.EQUAL, comp);
            }
            //TODO 其他過濾
            //注意這一行
            scvf.setFilterIfMissing(true);
            filterList.addFilter(scvf);
        }
    }

   

    /**
     * 設置流式查詢,實際就是設置startRowKey
     * @param scan scan對象
     * @param queryParam 查詢入參對象
     * @param queryLimit 查詢limit數量限制
     */
    public static void addStreamQuery(Scan scan, QueryParam queryParam, Integer queryLimit) {
        PageInfoParam pageInfoParam = queryParam.getPageInfoParam();
        String endRowKey = pageInfoParam.getEndRowKey();
        Integer pageSize = pageInfoParam.getPageSize();
        if (pageSize != null){
            if (pageSize.intValue() > queryLimit.intValue()){
                scan.setLimit(queryLimit + 1);
            }else {
                scan.setLimit(pageSize + 1);
            }
        }
        //原結束rowkey作爲開始rowkey
        if (StringUtils.isNotBlank(endRowKey)){
            scan.setStartRow(Bytes.toBytes(endRowKey));
        }

    }

    
}

好了,就分享到這裏,廢話不多說。HbaseApi的方法頁就是2個,一個是查詢記錄、一個是查詢最新記錄。查詢頁就那麼回事,用好scan、filter、幾個ValueFilter就行了。剩下的就是自己玩了,感謝大家。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章