目錄
一:跳轉過濾器(SkipFilter)
跳轉過濾器包裝了一個用戶提供的過濾器,當被包裝的過濾器遇到一個需要過濾的KeyValue實例時,用戶可以拓展並過濾到整行數據。換言之,當過濾器發現某一行中的一列被過濾時,那麼整行都會被過濾掉。案例將SkipFilter和ValueFilter組合起來獲取不包含空列值的行,同時過濾掉不符合條件的的行。
package appendFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import java.io.IOException;
public class skipFilter {
private Configuration configuration = null;
private Connection connection = null;
@Test
public void skipFilter() throws IOException {
System.out.println("begin");
configuration = HBaseConfiguration.create();
connection = ConnectionFactory.createConnection(configuration);
Table table = connection.getTable(TableName.valueOf("ns1:t1"));
Filter filter = new ValueFilter(CompareFilter.CompareOp.NOT_EQUAL,new BinaryComparator(Bytes.toBytes("ccc")));
Scan scan = new Scan();
scan.setFilter(filter);
ResultScanner resultScanner = table.getScanner(scan);
for(Result result:resultScanner){
for (KeyValue kv : result.raw()) {
System.out.println("KV: "+kv+",value: "+Bytes.toString(kv.getValue()));
}
}
resultScanner.close();
System.out.println("-------------------------------------------------");
Filter filter1 = new SkipFilter(filter);
Scan scan1 = new Scan();
scan1.setFilter(filter1);
ResultScanner resultScanner1 = table.getScanner(scan1);
for(Result result1:resultScanner1){
for (KeyValue kv : result1.raw()) {
System.out.println("KV: "+kv+",value: "+Bytes.toString(kv.getValue()));
}
}
resultScanner1.close();
table.close();
System.out.println("end");
}
}
結果顯示:
二:全匹配過濾器(WhileMatchFilter)
全匹配過濾器和跳轉過濾器相似,不過當一條數據被過濾掉時,它就會直接放棄這次掃描操作
package appendFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import java.io.IOException;
public class whileMatchFilter {
private Configuration configuration = null;
private Connection connection = null;
@Test
public void whileMatchFilter() throws IOException {
System.out.println("begin");
configuration = HBaseConfiguration.create();
connection = ConnectionFactory.createConnection(configuration);
Table table = connection.getTable(TableName.valueOf("ns1:t1"));
Filter filter = new RowFilter(CompareFilter.CompareOp.NOT_EQUAL,new BinaryComparator(Bytes.toBytes("row3")));
Scan scan = new Scan();
scan.setFilter(filter);
ResultScanner resultScanner = table.getScanner(scan);
for(Result result:resultScanner){
for (KeyValue kv : result.raw()) {
System.out.println("KV: "+kv+",value: "+Bytes.toString(kv.getValue()));
}
}
resultScanner.close();
System.out.println("-------------------------------------------------");
Filter filter1 = new WhileMatchFilter(filter);
Scan scan1 = new Scan();
scan1.setFilter(filter1);
ResultScanner resultScanner1 = table.getScanner(scan1);
for(Result result1:resultScanner1){
for (KeyValue kv : result1.raw()) {
System.out.println("KV: "+kv+",value: "+Bytes.toString(kv.getValue()));
}
}
resultScanner1.close();
table.close();
System.out.println("end");
}
}
結果展示: