java elastic 常用查詢

java Elastic 客戶端基本使用

引入jar

   compile 'org.elasticsearch:elasticsearch:5.5.0'

   compile 'org.elasticsearch.client:transport:5.5.0

client基本使用

得到client

Settings settings = Settings.builder().put("cluster.name", "lw-6-test").build();
TransportClient client = new PreBuiltTransportClient(settings);
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"), 9300));

關閉資源

client.close();
  • 搜索關鍵字全部要小寫。

get得到指定index type id的數據

  public static void prepareGet(TransportClient client) throws Exception {
        GetResponse response = client.prepareGet("mytest", "test", "p1").get();
        System.out.println(response);
    }

 output:

{"_index":"mytest","_type":"test","_id":"p1","_version":1,"found":true,"_source":{"name":"mac Book 筆記本",
"price":1233,
"description":"這是筆記本",
"cats":["3c","computer"]

}
}

insert添加數據

  public static void insert(TransportClient client) throws Exception {
        Map<String,Object> resource = new HashMap<>();
        resource.put("name","mac Note");
        resource.put("price",8877);
        resource.put("description","mac Note 新款");
        IndexRequestBuilder index = client.prepareIndex("mytest", "test");

        IndexResponse insertResponse = index.setSource(resource).execute().get();

        System.out.println(insertResponse);
    }


output:
    IndexResponse[index=mytest,type=test,id=AV8CZmTgGnilLCUrybiV,version=1,result=created,shards={"total":2,"successful":1,"failed":0}]

delete刪除數據

  public static void delete(TransportClient client) throws Exception{
        BulkByScrollResponse response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
                .filter(QueryBuilders.matchQuery("name", "mac")) //搜索
                .source("mytest") //index
                .get();


       long deleted = response.getDeleted();
       System.out.println("刪除個數: "+deleted);

    }

update 修改數據

 public static void update(TransportClient client) throws Exception{

        Map<String,Object> data = new HashMap<>();
        data.put("name","new mac node");

        UpdateRequest updateRequest =  new UpdateRequest();
        updateRequest.index("mytest");
        updateRequest.type("test");
        updateRequest.id("AV8CfcSLGnilLCUryoEl");

        updateRequest.doc(data);

        UpdateResponse response = client.update(updateRequest).get();
        System.out.println(response);

    }

output:
UpdateResponse[index=mytest,type=test,id=AV8CfcSLGnilLCUryoEl,version=2,result=updated,shards=ShardInfo{total=2, successful=1, failures=[]}]

MultiGet查詢多個index

    public static void multiIndex(TransportClient client) throws Exception {
        MultiGetResponse multiGetItemResponses = client.prepareMultiGet()
                .add("mytest","test","AV8CfcSLGnilLCUryoEl") //多個index
                .add("instestdb_business_log-2017.09","instestdb_business_log","AV7KHPtGDF9uyeK_lXln") //多個index
                .get();

        for(MultiGetItemResponse itemResponses : multiGetItemResponses) {
            GetResponse response = itemResponses.getResponse();
            if(response.isExists()) { 
                String json = response.getSourceAsString(); //獲取到_source field
                System.out.println(json);
            }

        }

    }

Bulk API 一次請求多個添加和刪除

 public static void BulkRequest(TransportClient client) throws Exception {
        BulkRequestBuilder bulkRequest = client.prepareBulk();

        IndexRequestBuilder index1 = client.prepareIndex("mytest", "test");
        IndexRequestBuilder index2 = client.prepareIndex("mytest", "test");

        Map<String,Object> resource = new HashMap<>();
        resource.put("name","華碩");
        resource.put("price",5577);
        resource.put("description","華碩z460");

        index1.setSource(resource);

        Map<String,Object> resource1 = new HashMap<>();
        resource1.put("name","小米2");
        resource1.put("price",4577);
        resource1.put("description","新機超薄");

        index1.setSource(resource);
        index2.setSource(resource1);

        bulkRequest.add(index1);
        bulkRequest.add(index2);


        BulkResponse bulkResponse = bulkRequest.get();


        if(bulkResponse.hasFailures()) {
            System.out.println(bulkResponse.buildFailureMessage());
        }

        bulkResponse.forEach(response ->{
            System.out.println(response.getId());
        });

    }

query dsl 使用

import static org.elasticsearch.index.query.QueryBuilders.*;

 Settings settings = Settings.builder()
                .put("cluster.name", "lw-6-test").build();
        TransportClient client = new PreBuiltTransportClient(settings);
        client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"), 9300));

        allquery(client); //具體的query dsl查詢

        client.close();

Match All Query 查詢所有的數據


    public static void allquery(TransportClient client) throws Exception{
        QueryBuilder qb = matchAllQuery();
        SearchResponse response = client.prepareSearch("mytest").setTypes("test").setSize(3).setQuery(qb).get();

        System.out.println("length: "+response.getHits().getHits().length );
        if(response.getHits().getTotalHits() != 0) {
            for (SearchHit hit : response.getHits().getHits()) {

                System.out.println(hit.getSourceAsString());
            }
        }
    }

Match Query 查詢單一條件的數據

  public static void myMatchQuery(TransportClient client) throws Exception {
        QueryBuilder qb = matchQuery("name","mac");

        SearchResponse response = client.prepareSearch("mytest").setTypes("test").setQuery(qb).get();
        System.out.println("length: "+response.getHits().getHits().length );
        if(response.getHits().getTotalHits() != 0) {
            for (SearchHit hit : response.getHits().getHits()) {
                System.out.println(hit.getScore()+" --> "+hit.getSourceAsString());
            }

        }
    }

MultiMatchQuery 在多個字段中查詢一個關鍵字

  QueryBuilder qb = multiMatchQuery("mac","description","name"); //mac是要搜索的詞 description,name 都是字段

Common Terms Query 搜索term

 public static void myCommonTermsQuery(TransportClient client ) throws Exception{
        QueryBuilder qb = commonTermsQuery("description","mac");
        print(qb,client);
    }

Simple Query String Query 簡單字符串查詢可以使用正則

public static void mySimpleQueryString(TransportClient client ) {
        QueryBuilder qb = queryStringQuery("mac*^2").field("name");
        print(qb,client);
    }

term 搜索關鍵詞一個

 public static void myTermQuery(TransportClient client) {
        QueryBuilder qb = termQuery("name","mac2");
        print(qb,client);
    }

terms 搜索關鍵詞多個

   QueryBuilder qb = termsQuery("name_str","小米","戴爾");
   print2(qb,client);

range query 範圍搜索

  public static void myRangeQuery(TransportClient client){
        QueryBuilder qb = rangeQuery("price").from(3399)
                .to(6399)
                .includeLower(true)
                .includeUpper(false);

        print2(qb,client);

    }

    QueryBuilder qb = rangeQuery("price").gte(3399).lt(6399);

Exists Query 查找字段是否存在 存在則返回所有的數據,不存在返回0

    public static void myExistsQuery(TransportClient client) {
        QueryBuilder qb = existsQuery("name_str");
        print2(qb,client);
    }

Wildcard Query 通配符查詢

QueryBuilder qb = wildcardQuery("user", "k?mc*");

Regexp Query支持正則表達式的查詢

QueryBuilder qb = regexpQuery("name.first", "s.*y");

Fuzzy Query 模糊查詢

QueryBuilder qb = fuzzyQuery(
    "name",     
    "kimzhy"    
);

ids Query 根據id 查詢

QueryBuilder qb = idsQuery("my_type", "type2")
    .addIds("1", "4", "100");

QueryBuilder qb = idsQuery() 
    .addIds("AV8HhVC8FiG-4m4G2rYp","AV8HhVB6FiG-4m4G2rYm");

複合查詢

Contant Score Query 指定score

  QueryBuilder qb = constantScoreQuery(matchQuery("name_str", "聯想")).boost(3.0f);

Bool Query must mustNot should 查詢

所有的 must 子句必須匹配, 並且所有的 must_not 子句必須不匹配, 但是多少 should 子句應
該匹配呢? 默認的,不需要匹配任何 should 子句,一種情況例外:如果沒有must子句,就必須至少匹 配一個should子句。
  public static void myBoolQuery(TransportClient client) {
        QueryBuilder qb = boolQuery().must(termQuery("name_str","小米"))
                .filter(matchQuery("price",3599))
                .filter(matchQuery("description","lihao"));
        print2(qb, client);
    }

    QueryBuilder qb = boolQuery()
    .must(termQuery("content", "test1"))    
    .must(termQuery("content", "test4"))    
    .mustNot(termQuery("content", "test2")) 
    .should(termQuery("content", "test3"))  
    .filter(termQuery("content", "test5"));

indices query查詢多個index

用來查詢多個index,對於指定內的index,執行指定的內部query;對於指定外的index,執行
no_match_query設定的條件
private static void print(QueryBuilder qb, TransportClient client) {
        SearchResponse response = client.prepareSearch("mytest").setTypes("test").setQuery(qb).get();
        System.out.println("length: " + response.getHits().getHits().length);
        if (response.getHits().getTotalHits() != 0) {
            for (SearchHit hit : response.getHits().getHits()) {
                System.out.println(hit.getScore() + " --> " + hit.getSourceAsString());
            }

        }
    }

    private static void print2(QueryBuilder qb, TransportClient client) {
        SearchResponse response = client.prepareSearch("mytest_1").setTypes("test").setQuery(qb).get();
        System.out.println("length: " + response.getHits().getHits().length);
        if (response.getHits().getTotalHits() != 0) {
            for (SearchHit hit : response.getHits().getHits()) {
                System.out.println(hit.getScore() + " --> " + hit.getSourceAsString());
            }

        }
    }

scroll分頁

 public static void main(String ...arg) throws Exception {

        //鏈接服務器
        Settings  settings = Settings.builder()
                .put("cluster.name","lw-6-test").build();

        TransportClient client = new PreBuiltTransportClient(settings);
        client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"), 9300));


        //設置搜索條件
        QueryBuilder qb = termQuery("name_str","筆記本");


        // 按照price 降序  每次查詢2條  第一次不需要設置sroll  scrollid
        SearchResponse scrollResp = client.prepareSearch("mytest_1").setTypes("test")
                .addSort("price", SortOrder.DESC)
                .setScroll(new TimeValue(30000))
                .setQuery(qb)
                .setSize(2).get();

        System.out.println("length: " + scrollResp.getHits().getHits().length);
        int count = 1;
        do{
            System.out.println("第 " +count+ " 次");
            System.out.println();
            for (SearchHit hit : scrollResp.getHits().getHits()){
                System.out.println(hit.getScore() + " --> " +hit.getSourceAsString());
            }


            System.out.println("scrollid:  "+scrollResp.getScrollId());

            //設置sroll id
            scrollResp =client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet();
            System.out.println();

            count++;

        } while (scrollResp.getHits().getHits().length !=0);


        client.close();

    }

prepareMultiSearch多個條件查詢

public class MultiSearchDemo {

    public static void main(String ...arg) throws Exception{
        Settings settings = Settings.builder().put("cluster.name", "lw-6-test").build();

        TransportClient client = new PreBuiltTransportClient(settings);
        client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"), 9300));


       QueryBuilder query1 = termQuery("name_str","小米");
       QueryBuilder query2 = termQuery("name_str","戴爾");

        SearchRequestBuilder srb1 = client.prepareSearch("mytest_1").setTypes("test").setQuery(query1);
        SearchRequestBuilder srb2 = client.prepareSearch("mytest_1").setTypes("test").setQuery(query2);

        MultiSearchResponse sr = client.prepareMultiSearch().add(srb1).add(srb2).get();

        long nbHits =0;
        for(MultiSearchResponse.Item item : sr.getResponses()) {
            SearchResponse response = item.getResponse();
            nbHits += response.getHits().getTotalHits();

            if(response.getHits().getHits().length >0) {

                for(SearchHit hit : response.getHits().getHits()) {
                    System.out.println(hit.getScore()+"  ----->  "+hit.getSourceAsString());
                }

            }
            System.out.println("-------------------------");
        }

        System.out.println(nbHits);

        client.close();

    }
}

聚合

概述

ES 的聚合框架提供對查詢得到的數據進行分組和彙總統計,以提供複雜的統計分析功能。

ES支持在一次聚合查詢中,可以同時得到聚合的具體結果並再次進行聚合,也就是聚合是可以嵌套的。

這非常有用,你可以通過一次操作得到多次聚合的結果,從而避免多次請求,減少網絡和服務器的負擔。

聚合的類型

1:Bucketing(桶)聚合:劃分不同的“桶”,將數據分配到不同的“桶” 裏,然後再進行聚合,非常類似sql 中的group 語句的含義。

2:Metric(指標)聚合:指標聚合主要針對number類型的數據,在一組文檔中,保持對要聚合的指標的跟蹤和計算,需要ES做比較多的計算工作。

3:Pipeline(管道)聚合:用來聚合其它聚合輸出的結果以及相關指標

聚合的基本語法

"aggregations" : {                      //定義聚合對象,也可用 "aggs"
    "<aggregation_name>" : {            //聚合的名稱,用戶自定義   
        "<aggregation_type>" : {         //聚合類型,比如 "histogram" "avg"
            <aggregation_body>
        }
        [,"meta" : {  [<meta_data_body>] } ]?
        [,"aggregations" : { [<sub_aggregation>]+ } ]?
    }
    [,"<aggregation_name_2>" : { ... } ]*  ////定義額外的多個平級聚合,只有Bucketing類型纔有意義
}

GET mytest_1/test/_search
{
  "aggs" : {
        "avg_price" : { "avg" : { "field" : "price" } }
    }
}

output
{ "aggregations": {
    "avg_price": {
      "value": 4954.555555555556
    }
  }


Metric 使用

求平均值

   public static void main(String ...arg) throws Exception {

        Settings settings = Settings.builder().put("cluster.name", "lw-6-test").build();

        TransportClient client = new PreBuiltTransportClient(settings);
        client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"),9300));


        avg(client);



        client.close();


    }

     private static void avg(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");

        SearchResponse sr = search.addAggregation(AggregationBuilders.avg("avg_price").field("price")).execute().actionGet();

        Avg result = sr.getAggregations().get("avg_price");
        System.out.println(result.getValue());
    }


    POST mytest_1/test/_search?size=0
{
  "aggs" : {

        "avg_price" : { 

          "avg" : { "field" : "price" } 

        }
    }
}
POST mytest_1/test/_search?size=0
{
   "aggs" : {
        "all_cats" : {
          "terms" : { "field" : "tag.keyword" },
              "aggs" : {
                  "avg_price" : {
                      "avg" : { "field" : "price" }
                  }


              }
        }

   }
}


  "aggregations": {
    "all_cats": {
      "doc_count_error_upper_bound": 0,
      "sum_other_doc_count": 0,
      "buckets": [
        {
          "key": "筆記本",
          "doc_count": 8,
          "avg_price": {
            "value": 5124
          }
        },
        {
          "key": "聯想",
          "doc_count": 4,
          "avg_price": {
            "value": 5649
          }
        },
        {
          "key": "小米",
          "doc_count": 2,
          "avg_price": {
            "value": 4399
          }
        },
        {
          "key": "惠普",
          "doc_count": 1,
          "avg_price": {
            "value": 2399
          }
        },
        {
          "key": "戴爾",
          "doc_count": 1,
          "avg_price": {
            "value": 7199
          }
        }
      ]
    }
  }

分類求取平均值

PUT mytest_1/_mapping/test
{
  "properties": {
    "tag": { 
      "type":     "text",
      "fielddata": true
    }
  }
}

    private static void avg1(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setSize(0).setTypes("test");

        TermsAggregationBuilder tag = AggregationBuilders.terms("tags").field("tag.keyword");
        AvgAggregationBuilder price = AggregationBuilders.avg("avg_price").field("price");
        tag.subAggregation(price);

        SearchResponse sr = search.addAggregation(tag).execute().actionGet();

        System.out.println(sr);
    }

Cardinality 去除重複數據

用來對單個數據進行彙總,計算不重複的值的數量。

    public static void main(String... arg) throws Exception {

        Settings settings = Settings.builder().put("cluster.name", "lw-6-test").build();

        TransportClient client = new PreBuiltTransportClient(settings);
        client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("10.10.10.6"), 9300));


        cardinality(client);


        client.close();


    }

  private static void cardinality(TransportClient client) {

        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");
        SearchResponse sr = search.addAggregation(AggregationBuilders.cardinality("type_count").field("price")).execute().actionGet();

        Cardinality result = sr.getAggregations().get("type_count");

        System.out.println("type_count: "+result.getValue());

    }


POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "type_count" : {
            "cardinality" : {
                "field" : "price"
            }
        }
    }
}

out:
{
  "took": 7,
  "timed_out": false,
  "_shards": {
    "total": 5,
    "successful": 5,
    "failed": 0
  },
  "hits": {
    "total": 8,
    "max_score": 0,
    "hits": []
  },
  "aggregations": {
    "type_count": {
      "value": 6
    }
  }
}


Stats 聚合操作 count min max avg sum

    private static void stats(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");
        SearchResponse sr =  search.addAggregation(AggregationBuilders.stats("price_stats").field("price")).execute().actionGet();

       Stats stats = sr.getAggregations().get("price_stats");
        System.out.println(stats.getAvgAsString());
        System.out.println(stats.getMaxAsString());
        System.out.println(stats.getMinAsString());
        System.out.println(stats.getSumAsString());

    }


POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "price_stats" : { "extended_stats" : { "field" : "price" } }
    }
}

out:
 "aggregations": {
    "price_stats": {
      "count": 8,
      "min": 2399,
      "max": 7199,
      "avg": 5124,
      "sum": 40992,
      "sum_of_squares": 231958008,
      "variance": 2739375,
      "std_deviation": 1655.1057368035433,
      "std_deviation_bounds": {
        "upper": 8434.211473607087,
        "lower": 1813.7885263929134
      }
    }
  }

Percentiles 百分比 查看一個百分比對應的值

這是一個多值的指標聚集,用來計算聚合文檔中,在某個百分比或某個區間,所對應的觀測值,

1:缺省的percentile的區間是[ 1, 5, 25, 50, 75, 95, 99 ]。

2:觀測值通常都是近似的,有很多不同的算法來計算。

​ 例如:第九十五個百分值是大於所觀察到的值的95%的值。

POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "price_percent" : {
            "percentiles" : {
                "field" : "price" 
            }
        }
    }
}

POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "price_percent" : {
            "percentiles" : {
                "field" : "price" ,
                "percents" : [0.1,50,95, 99, 100]  //自定義百分比區間
            }
        }
    }
}


  "aggregations": {
    "price_percent": {
      "values": {
        "1.0": 2468.9999999999995,
        "5.0": 2749,
        "25.0": 3549,
        "50.0": 5799,
        "75.0": 6399,
        "95.0": 6918.999999999999,
        "99.0": 7143
      }
 private static void percentile(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");
        SearchResponse sr =  search.addAggregation(AggregationBuilders.percentiles("price_percent").field("price")).execute().actionGet();

       Percentiles percentile = sr.getAggregations().get("price_percent");

       System.out.println(percentile.percentileAsString(80));
    }

    //自定義百分比區間
    private static void percentile2(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");
        SearchResponse sr =  search.addAggregation(AggregationBuilders.percentiles("price_percent").percentiles(0.1,50,95, 99, 100).field("price")).execute().actionGet();

        System.out.println(sr);
        Percentiles percentile = sr.getAggregations().get("price_percent");

        System.out.println(percentile.percentileAsString(80));
    }

Value Count 計算聚合值的數量

POST mytest_1/test/_search?size=0
{
"aggs" : {
        "types_count" : { "value_count" : { "field" : "price" } }
    }

}  

out:
"aggregations": {
    "types_count": {
      "value": 8
    }
 private static void valueCount(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");
        SearchResponse sr =search.addAggregation(AggregationBuilders.count("value_count").field("price")).execute().actionGet();
       ValueCount valueCount = sr.getAggregations().get("value_count");
       System.out.println(valueCount.getValue());

    }

TOP hits

用來取符合條件的前n條數據。 包含的選項有:from、size、sort。

    private static void topHits(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test");

        TopHitsAggregationBuilder addtion = AggregationBuilders.topHits("top_price_hits").sort("price", SortOrder.DESC).fieldDataField("price")
                .size(5);

        SearchResponse sr =search.addAggregation(addtion).execute().actionGet();
       TopHits topHits = sr.getAggregations().get("top_price_hits");
       System.out.println();
        SearchHit[] hits = topHits.getHits().internalHits();
        for(SearchHit searchHit : hits) {
            System.out.println(searchHit.getSourceAsString());

        }

    }


bucket 使用

Histogram

條形圖聚合,根據文檔中的謀改革字段來分組。一個文檔屬於某個通,計算過程大致如下:

rem = value % interval
if (rem < 0) {
    rem += interval
}
bucket_key = value - rem

1:可配置的參數:
(1)field:字段,必須爲數值類型

(2)interval:分桶間距 (3)min_doc_count:最少文檔數,桶過濾,只有不少於這麼多文檔的桶纔會返回

(4)extended_bounds:範圍擴展

(5)order:對桶排序,如果 histogram 聚合有一個權值聚合類型的“直接”子聚合,那麼排序可以使用 子聚合中的結果

(6)offset:桶邊界位移,默認從0開始 (7)keyed:hash結構返回,默認以數組形式返回每一個桶 (8)missing:配置缺省默認值

POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "prices" : {
            "histogram" : {
                "field" : "price",
                "interval" : 2000
            }
        }
    }
}

out:
 "aggregations": {
    "prices": {
      "buckets": [
        {
          "key": 2000,
          "doc_count": 3
        },
        {
          "key": 4000,
          "doc_count": 1
        },
        {
          "key": 6000,
          "doc_count": 4
        }
      ]
    }
  private static void histogram(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test").setSize(0);
        HistogramAggregationBuilder addtion = AggregationBuilders.histogram("prices").interval(2000).field("price");

        SearchResponse sr = search.addAggregation(addtion).execute().actionGet();

       Histogram histogram = sr.getAggregations().get("prices");
       histogram.getBuckets().forEach(bucket->{
           System.out.println(bucket.getKeyAsString()+" ---->  "+bucket.getDocCount());
       });
    }

Range

範圍聚合,是對某個字段的值,按照設定的範圍進行分組。

POST mytest_1/test/_search?size=0
{
    "aggs" : {
        "price_ranges" : {
            "range" : {
                "field" : "price",
                "ranges" : [
                    { "to" : 3000 },
                    { "from" : 3000, "to" : 5000 },
                    { "from" : 5000 }
                ]
            }
        }
    }
}

out:
"aggregations": {
    "price_ranges": {
      "buckets": [
        {
          "key": "*-3000.0",
          "to": 3000,
          "doc_count": 1
        },
        {
          "key": "3000.0-5000.0",
          "from": 3000,
          "to": 5000,
          "doc_count": 2
        },
        {
          "key": "5000.0-*",
          "from": 5000,
          "doc_count": 5
        }
      ]
    }
   private static void range(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test").setSize(0);
        AggregationBuilder addtion = AggregationBuilders.range("price_ranges").field("price")
                .addUnboundedTo(3000)
                .addRange(3000,5000)
                .addUnboundedFrom(5000);

        SearchResponse sr = search.addAggregation(addtion).execute().actionGet();

       Range histogram = sr.getAggregations().get("price_ranges");
       histogram.getBuckets().forEach(bucket->{
           String key = bucket.getKeyAsString();
           String from = bucket.getFromAsString();
           String to = bucket.getToAsString();
           long count = bucket.getDocCount();
           System.out.println("key : "+key+"\t form: "+from+"\t to:"+to+"\t count:"+count);
       });
    }

Date Range

Ip Range

Terms

詞元聚合,以指定的字段內的每一個不重複的term來分組,並計算每個組內文檔的個數。

POST mytest_1/test/_search?size=0
{
   "aggs" : {
        "all_cats" : {
          "terms" : { "field" : "tag.keyword" }

        }

   }
}

Filters 過濾

多過濾聚合,用多個過濾條件,來對當前文檔進行過濾的聚合,每個過濾都包含所有滿足它的文檔,

多個bucket中可能重複。

POST mytest_1/test/_search?size=0
{
  "size": 0,
  "aggs" : {
    "messages" : {
      "filters" : {
        "filters" : {
          "filter1" :   { "match" : { "tag" : "小米"   }},
          "filter2" : { "match" : { "tag" : "戴爾" }}
        }
      }
    }
  }
}

out:
"aggregations": {
    "messages": {
      "buckets": {
        "filter1": {
          "doc_count": 2
        },
        "filter2": {
          "doc_count": 1
        }
      }
    }

private static  void filters(TransportClient client) {
        SearchRequestBuilder search = client.prepareSearch("mytest_1").setTypes("test").setSize(0);

        AggregationBuilder aggregation = AggregationBuilders.filters("filters",
                new FiltersAggregator.KeyedFilter("xiaomi",matchQuery("tag","小米")),
                new FiltersAggregator.KeyedFilter("daier",matchQuery("tag","戴爾"))
        );
        SearchResponse sr = search.addAggregation(aggregation).execute().actionGet();
        System.out.println(sr);
        Filters agg =sr.getAggregations().get("filters");
        agg.getBuckets().forEach(entry->{
            String key = entry.getKeyAsString();            // bucket key
            long docCount = entry.getDocCount();
            System.out.println("key : "+key+"\t docCount: "+docCount);
        });


    }

技術點

分詞

分詞安裝

https://github.com/medcl/elasticsearch-analysis-ik/tree/v5.5.0

測試分詞

GET _analyze
{
  "analyzer": "ik_smart",
  "text": ["中華人民共和國"]

}

參數

boost:權重,缺省1.0

bool

bool 過濾 :可以用來合併多個過濾條件查詢結果的布爾邏輯,它包含一下操作符:

must:多個查詢條件的完全匹配,相當於 and。

must_not: 多個查詢條件的相反匹配,相當於 not。

should: 至少有一個查詢條件匹配, 相當於 or。

參考文檔

https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/_structuring_aggregations.html

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章