HBase Shell命令(1.2官方文檔)

hbase shell 主要分類有6種
  1. General HBase shell commands
    1. 查看集羣狀態

      #status後面可以跟不同的參數
      status
      status 'simple'
      status 'summary' //默認的
      status 'detailed'
      hbase(main):015:0> status
      1 active master, 1 backup masters, 3 servers, 0 dead, 1.3333 average load
    2. 查看版本信息

      hbase(main):016:0> version
      1.2.4, r67592f3d062743907f8c5ae00dbbe1ae4f69e5af, Tue Oct 25 18:10:20 CDT 2016
    3. 查看當前用戶

      hbase(main):001:0> whoami
      hadoop (auth:SIMPLE)
      groups: hadoop
  2. Tables Management commands

    1. alter

      當一個alter操作有些賦值錯誤時,會卡主,退出重進
      當hbase.online.schema.update.enable屬性設置爲false的時候,alter之前需要先disable操作
      通過表名和一個dictionary指定一個新的column family
      
      #保存cell的5個最新版本
      
      hbase> alter 't1', NAME => 'f1', VERSIONS => 5
      
      
      #增加一個column family===》增加‘f2’
      
      hbase(main):015:0> alter 't1',NAME=>'f1',NAME=>'F2'
      
      
      #用花括號分割,操作一個表的幾個column family
      
      alter 't1',{NAME=>'f1',VERSIONS=2},{NAME=>'f2',IN_MEMORY=>true}
      
      
      #刪除表t1的f1 列族
      
      hbase(main):004:0> alter 't1','delete'=>'f1'
      
      
      #更改表範圍屬性 MAX_FILESIZE, READONLY,MEMSTORE_FLUSHSIZE, DEFERRED_LOG_FLUSH等
      
      hbase> alter 't1', MAX_FILESIZE => '134217728'
      
      #完整版
      
      alter 't1',METHOD=>'table_att',MAX_FILESIZE=>'134217728'
      
      
      #通過設置表的coprocesser屬性來爲一個表指定協處理器
      
      hbase> alter 't1','coprecessor'=>'[coprocessor jar file location] | class name | [priority] | [arguments]'
      hbase> alter 't1','coprocessor'=>'hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2'
      
      #當一個表有很多協處理器時,a sequence number 會自動追加到屬性名上來標識他們
      
      
      
      #爲某一個表或者某一個cf指定configuration
      
      hbase> alter 't1',CONFIGURATION=>{'hbase.hregion.scan.loadColumnFamiliesOnDemand'=>'true'}
      hbase> alter 't1', {NAME => 'f2', CONFIGURATION => {'hbase.hstore.blockingStoreFiles' => '10'}}
      
      
      #移除表範圍屬性
      
      hbase> alter 't1', METHOD => 'table_att_unset', NAME => 'MAX_FILESIZE'
      
      hbase> alter 't1', METHOD => 'table_att_unset', NAME => 'coprocessor$1'
      
      
      #設置region備份
      
       hbase> alter 't1', {REGION_REPLICATION => 2}
      
      
      #多個alter操作寫在一起
      
      hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, { MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' },OWNER => 'johndoe', METADATA => { 'mykey' => 'myvalue' }
      
    2. create

      建表命令
      需要的參數:表名、列族名們(至少一個)
      可選的參數:表配置、列配置、region配置
      未指定namespace時,創建在default空間下
      
      hbase> create 'ns1:t1', {NAME => 'f1', VERSIONS => 5}
      
      hbase> create 't1', {NAME => 'f1'}, {NAME => 'f2'}, {NAME => 'f3'}
      
      #上面的命令可以簡寫成下面的樣子
      
      hbase> create 't1', 'f1', 'f2', 'f3'
      
      #表配置選項可以追加到上面的命令後
      
        hbase> create 'ns1:t1', 'f1', SPLITS => ['10', '20', '30', '40']
        hbase> create 't1', 'f1', SPLITS => ['10', '20', '30', '40']
        hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt', OWNER => 'johndoe'
        hbase> create 't1', {NAME => 'f1', VERSIONS => 5}, METADATA => { 'mykey' => 'myvalue' }
      
      #預切割表
      
      hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
      hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit', REGION_REPLICATION => 2, CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'}}
      hbase> create 't1', {NAME => 'f1', DFS_REPLICATION => 1}
      
      
      #引用
      
      hbase> t1 = create 't1', 'f1'
    3. describe

      
      #完整的命令
      
      hbase> describe 'ns1:t1'
      
      #describe可以簡寫成desc
      
    4. disable

      hbase> disable 'ns1:t1'
    5. disable_all

      可以匹配正則
      hbase(main):005:0> disable_all '.*1'
      t1
    6. is_disable

      hbase(main):006:0> is_disabled 't1'
      false
    7. drop

    8. drop_all
    9. enable
    10. enable_all
    11. is_enabled
    12. exists
    13. list
    14. show_filters

      列出hbase的過濾器
    15. alter_status

      alter 命令的狀態,返回多少個region更新完了,在使用alter的時候也可以看到
    16. alter_async

      異步更改,不用等每個region都更新完
  3. Data Manipulation commands

    1. count

      
      #返回一個表的行數,表很大時,可能會花費很長時間,實際上執行了這個命令
      
      $HADOOP_HOME/bin/hadoop jar hbase.jar rowcoun
      
      
      #默認每數1000行顯示一個count,這個顯示間隔也可以被指定
      
      hbase(main):006:0> count 'test',INTERVAL=>1
      Current count: 1, row: row1
      Current count: 2, row: row2
      2 row(s) in 0.0380 seconds
      => 2
      hbase(main):007:0> count 'test',INTERVAL=>2
      Current count: 2, row: row2
      2 row(s) in 0.0170 seconds
      => 2
      
      
      #可以使用表引用
      
    2. delete

      
      #確定一個cell的參數:表名、行、列、時間戳
      
      加不加{VISIBILITY=>'PRIVATE|SECRET'},沒看出區別來
      hbase(main):009:0> scan 'test'
      ROW                              COLUMN+CELL                                                                                  
       row1                            column=data:1, timestamp=1492479184416, value=v4                                             
       row1                            column=data:2, timestamp=1492479208074, value=v4                                             
       row2                            column=data:2, timestamp=1490237442551, value=v2                                             
      2 row(s) in 0.0260 seconds
      
      hbase(main):010:0> delete 'test', 'row1', 'data:1', 1492479184416
      0 row(s) in 0.1050 seconds
      
      hbase(main):011:0> scan 'test'
      ROW                              COLUMN+CELL                                                                                  
       row1                            column=data:2, timestamp=1492479208074, value=v4                                             
       row2                            column=data:2, timestamp=1490237442551, value=v2                                             
      2 row(s) in 0.0200 seconds
      
      hbase(main):012:0> delete 'test', 'row1', 'data:2', 1492479208074,{VISIBILITY=>'PRIVATE|SECRET'}
      0 row(s) in 0.0210 seconds
    3. deleteall

      
      #按行刪除所有cell
      
      
      #只指定行時,行所有cell都刪除
      
      
      #指定的越具體,刪除的越少
      
    4. get

    
    #獲得一行或者以個cell的內容
    
      hbase> t.get 'r1'
      hbase> t.get 'r1', {TIMERANGE => [ts1, ts2]}
      hbase> t.get 'r1', {COLUMN => 'c1'}
      hbase> t.get 'r1', {COLUMN => ['c1', 'c2', 'c3']}
      hbase> t.get 'r1', {COLUMN => 'c1', TIMESTAMP => ts1}
    
    #VERSIONS => 4 這是幹啥的?
    
      hbase> t.get 'r1', {COLUMN => 'c1', TIMERANGE => [ts1, ts2], VERSIONS => 4}
      hbase> t.get 'r1', {COLUMN => 'c1', TIMESTAMP => ts1, VERSIONS => 4}
    
    #過濾器,相當於模糊查詢?
    
      hbase> t.get 'r1', {FILTER => "ValueFilter(=, 'binary:abc')"}
      hbase> t.get 'r1', 'c1'
      hbase> t.get 'r1', 'c1', 'c2'
      hbase> t.get 'r1', ['c1', 'c2']
    
    #CONSISTENCY 一致性
    
      hbase> t.get 'r1', {CONSISTENCY => 'TIMELINE'}
      hbase> t.get 'r1', {CONSISTENCY => 'TIMELINE', REGION_REPLICA_ID => 1}
    
    #除了默認的toStringBinary外,還可以爲一個column指定格式化,只能爲一個列指定,不能爲一個列族指定
    
    
    #自定義格式化的兩種格式
    
      hbase> get 't1', 'r1' {COLUMN => ['cf:qualifier1:toInt','cf:qualifier2:c(org.apache.hadoop.hbase.util.Bytes).toInt'] } 
    1. get_counter

      返回一個counter cell的值,這個cell應該是自增長的,並且裏面的值是二進制編碼的
      hbase> get_counter 'ns1:t1', 'r1', 'c1'
    2. incr

      incr的這個列必須是long型的
      hbase(main):053:0> put 't1','r1','f1:c2','20000000'
      0 row(s) in 0.0100 seconds
      
      hbase(main):054:0> incr 't1','r1','f1:c2'
      COUNTER VALUE = 3616443484303536177
      0 row(s) in 0.3320 seconds
      
      
      #用法
      
      hbase> incr 'ns1:t1', 'r1', 'c1'
      hbase> incr 't1', 'r1', 'c1'
      hbase> incr 't1', 'r1', 'c1', 1
      hbase> incr 't1', 'r1', 'c1', 10
      hbase> incr 't1', 'r1', 'c1', 10, {ATTRIBUTES=>{'mykey'=>'myvalue'}}
      hbase> incr 't1', 'r1', 'c1', {ATTRIBUTES=>{'mykey'=>'myvalue'}}
      hbase> incr 't1', 'r1', 'c1', 10, {VISIBILITY=>'PRIVATE|SECRET'}
    3. put

      hbase> put 'ns1:t1', 'r1', 'c1', 'value'
      hbase> put 't1', 'r1', 'c1', 'value'
      hbase> put 't1', 'r1', 'c1', 'value', ts1
      hbase> put 't1', 'r1', 'c1', 'value', {ATTRIBUTES=>{'mykey'=>'myvalue'}}
      hbase> put 't1', 'r1', 'c1', 'value', ts1, {ATTRIBUTES=>{'mykey'=>'myvalue'}}
      hbase> put 't1', 'r1', 'c1', 'value', ts1, {VISIBILITY=>'PRIVATE|SECRET'}
       ```
      
      8. scan

    hbase元數據

    hbase(main):057:0> scan ‘hbase:meta’
    hbase(main):060:0> scan ‘hbase:meta’, {COLUMNS => ‘info:regioninfo’}

    用法

    hbase> scan ‘ns1:t1’, {COLUMNS => [‘c1’, ‘c2’], LIMIT => 10, STARTROW => ‘xyz’}
    hbase> scan ‘t1’, {COLUMNS => [‘c1’, ‘c2’], LIMIT => 10, STARTROW => ‘xyz’}
    hbase> scan ‘t1’, {COLUMNS => ‘c1’, TIMERANGE => [1303668804, 1303668904]}
    hbase> scan ‘t1’, {REVERSED => true}
    hbase> scan ‘t1’, {ALL_METRICS => true}
    hbase> scan ‘t1’, {METRICS => [‘RPC_RETRIES’, ‘ROWS_FILTERED’]}
    hbase> scan ‘t1’, {ROWPREFIXFILTER => ‘row2’, FILTER => ”
    (QualifierFilter (>=, ‘binary:xyz’)) AND (TimestampsFilter ( 123, 456))”}
    hbase> scan ‘t1’, {FILTER =>
    org.apache.hadoop.hbase.filter.ColumnPaginationFilter.new(1, 0)}
    hbase> scan ‘t1’, {CONSISTENCY => ‘TIMELINE’}
    #屬性設置
    hbase> scan ‘t1’, { COLUMNS => [‘c1’, ‘c2’], ATTRIBUTES => {‘mykey’ => ‘myvalue’}}
    hbase> scan ‘t1’, { COLUMNS => [‘c1’, ‘c2’], AUTHORIZATIONS => [‘PRIVATE’,’SECRET’]}
    #附加操作,caches blocks,爲掃描器設置的一塊緩存
    hbase> scan ‘t1’, {COLUMNS => [‘c1’, ‘c2’], CACHE_BLOCKS => false}
    #RAM 寄存器,返回所有的cell,包括被標記爲deleted的和未收回的delted cell
    hbase> scan ‘t1’, {RAW => true, VERSIONS => 10}

    #scan也支持自定義格式
    hbase> scan ‘t1’, {COLUMNS => [‘cf:qualifier1:toInt’,’cf:qualifier2:c(org.apache.hadoop.hbase.util.Bytes).toInt’] }

    9. truncate

    disable+drop+recreate
    “`

  4. HBase surgery tools

    1. assign

      
      #分配一個region(給誰?)
      
      hbase> assign 'REGIONNAME'
      hbase> assign 'ENCODED_REGIONNAME'
      
      hbase(main):005:0> assign 't1,10,1492773948319.0d9258cf91b0838b3d480500f60c4310.'
      0 row(s) in 0.7380 seconds
    2. balancer

      
      #觸發集羣平衡器
      
      
      #返回true:balancer運行並且成功的通知regionservers去接觸綁定來平衡集羣(重新指派異步執行)
      
      hbase(main):002:0> balancer
      true    
      0 row(s) in 0.3150 seconds
      
      #當region處於過度狀態,或者開關關掉的時候返回false
      
      hbase(main):017:0> balancer
      false                                                                                                                         
      0 row(s) in 0.0110 seconds
    3. balance_switch

      balancer 可用性開關
      返回開關的上一個狀態
      hbase> balance_switch true
      hbase> balance_switch false
    4. close_region

      關閉一個region
      hbase> close_region 'REGIONNAME'
      hbase> close_region 'REGIONNAME', 'SERVER_NAME'
      hbase> close_region 'ENCODED_REGIONNAME'
      hbase> close_region 'ENCODED_REGIONNAME', 'SERVER_NAME'
      
      hbase(main):033:0> close_region 'testAgain,,1492676988483.c31e2b2195d2cde9b0236c773397e49d.'
      0 row(s) in 0.0820 seconds
      
      
      #使用encode regionname不好用
      
    5. compact

      Compact all regions in a table:
      hbase> compact 'ns1:t1'
      
      #實際上沒有發生變化,t1的4個分區沒有合併成一個。。。
      
      hbase> compact 't1'
      Compact an entire region:
      hbase> compact 'r1'
      Compact only a column family within a region:
      hbase> compact 'r1', 'c1'
      Compact a column family within a table:
      hbase> compact 't1', 'c1'
    6. flush

      
        hbase> flush 'TABLENAME'
        hbase> flush 'REGIONNAME'
        hbase> flush 'ENCODED_REGIONNAME'
      
      
      hbase(main):041:0> flush 't1,30,1492773948319.1f0aa448a2f0054fb07492afc16f177e.'
      0 row(s) in 0.1010 seconds
      hbase(main):043:0> flush 't1'
      0 row(s) in 1.0100 seconds
    7. major_compact

          Compact all regions in a table:
          hbase> major_compact 't1'
          hbase> major_compact 'ns1:t1'
          Compact an entire region:
          hbase> major_compact 'r1'
          Compact a single column family within a region:
          hbase> major_compact 'r1', 'c1'
          Compact a single column family within a table:
          hbase> major_compact 't1', 'c1'
    8. move

      隨機移動一個region 或者指定一個目的地
      server_name的組成host+port+startcode
      hbase> move 'ENCODED_REGIONNAME'
      hbase> move 'ENCODED_REGIONNAME', 'SERVER_NAME'
    9. split

      split 'tableName'
      split 'namespace:tableName'
      split 'regionName' # format: 'tableName,startKey,id'
      split 'tableName', 'splitKey'
      split 'regionName', 'splitKey'
      
      前面的一個分區點操作時分了102030
      現在把30到末尾分爲3035
      hbase(main):009:0> split 't1,30,1492773948319.1f0aa448a2f0054fb07492afc16f177e.','35'
      0 row(s) in 0.0700 seconds
      
    10. unassign

      true參數強制清楚in-memory狀態,然而不懂有啥用
      hbase> unassign 'REGIONNAME'
      hbase> unassign 'REGIONNAME', true
      hbase> unassign 'ENCODED_REGIONNAME'
      hbase> unassign 'ENCODED_REGIONNAME', true
    11. hlog_roll

      把日誌信息寫到一個file裏
      參數:servername
      hbase(main):015:0> hlog_roll 'datanode2,16020,1493112337861'
      0 row(s) in 0.0660 seconds
    12. zk_dump

      hbase(main):016:0> zk_dump
      HBase is rooted at /hbase
      Active master address: namenode,16000,1489714875654
      Backup master addresses:
       datanode2,16000,1489715230460
      Region server holding hbase:meta: datanode2,16020,1489715865553
      Region servers:
       namenode,16020,1489714878404
       datanode1,16020,1489714878004
       datanode2,16020,1489715865553
      /hbase/replication: 
      /hbase/replication/peers: 
      /hbase/replication/rs: 
      /hbase/replication/rs/datanode2,16020,1489715865553: 
      /hbase/replication/rs/datanode1,16020,1489714878004: 
      /hbase/replication/rs/namenode,16020,1489714878404: 
      Quorum Server Statistics:
       namenode:2181
        Zookeeper version: 3.4.9-1757313, built on 08/23/2016 06:50 GMT
        Clients:
         /172.16.11.235:43512[0](queued=0,recved=1,sent=0)
         /172.16.11.235:43510[1](queued=0,recved=16,sent=16)
      
        Latency min/avg/max: 0/1/100
        Received: 271
        Sent: 270
        Connections: 2
        Outstanding: 0
        Zxid: 0x2000001ec
        Mode: follower
        Node count: 48
       datanode1:2181
        Zookeeper version: 3.4.9-1757313, built on 08/23/2016 06:50 GMT
        Clients:
         /172.16.11.218:52992[1](queued=0,recved=110518,sent=110525)
         /172.16.11.218:52988[1](queued=0,recved=110289,sent=110289)
         /172.16.11.235:52524[1](queued=0,recved=110690,sent=110698)
         /172.16.11.218:52982[1](queued=0,recved=110291,sent=110291)
         /172.16.11.235:42630[0](queued=0,recved=1,sent=0)
         /172.16.11.235:52520[1](queued=0,recved=110278,sent=110278)
         /172.16.11.235:52516[1](queued=0,recved=162973,sent=162973)
      
        Latency min/avg/max: 0/0/31
        Received: 718898
        Sent: 718912
        Connections: 7
        Outstanding: 0
        Zxid: 0x2000001ec
        Mode: follower
        Node count: 48
       datanode2:2181
        Zookeeper version: 3.4.9-1757313, built on 08/23/2016 06:50 GMT
        Clients:
         /172.16.11.235:51826[0](queued=0,recved=1,sent=0)
         /172.16.11.216:37116[1](queued=0,recved=110286,sent=110286)
         /172.16.11.218:38290[1](queued=0,recved=110288,sent=110288)
         /172.16.11.235:33480[1](queued=0,recved=110275,sent=110275)
         /172.16.11.216:37114[1](queued=0,recved=110489,sent=110495)
         /172.16.11.235:33468[1](queued=0,recved=110274,sent=110274)
         /172.16.11.235:51482[1](queued=0,recved=131,sent=131)
         /172.16.11.235:33470[1](queued=0,recved=161093,sent=161488)
      
        Latency min/avg/max: 0/0/152
        Received: 715009
        Sent: 715409
        Connections: 8
        Outstanding: 0
        Zxid: 0x2000001ec
        Mode: leader
        Node count: 48
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章