kafka自定義分區API

        

導入pom文件

<dependencies>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>1.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-streams</artifactId>
            <version>1.0.0</version>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <!-- java編譯插件 -->
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.2</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
            </plugin>
        </plugins>
    </build>

Producer生產者代碼

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/*
用於生產數據到kafka集羣
 */
public class Producer {


    //程序的入口
    public static void main(String[] args){

        //1、配置kafka集羣
        Properties props = new Properties();
        //kafka服務器地址
        props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        //消息確認機制
        props.put("acks", "all");
        //重試機制
        props.put("retries", 1);
        //批量發送的大小
        props.put("batch.size", 16384);
        //消息延遲
        props.put("linger.ms", 1);
        //批量的緩衝區大小
        props.put("buffer.memory", 33554432);
        //kafka數據中key  value的序列化
        props.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        //指定自定義分區類
        props.put("partitioner.class", "task.task05.ProducerPartition");

        //2、獲取producer實例
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(props);

        ProducerPartition producerPartition = new ProducerPartition();

        //3、通過實例發送數據到kafka集羣
        for (int i = 0; i <300; i++) {
            //自定義分區中
             ProducerRecord producerRecord = new ProducerRecord("title","0",""+i);
             kafkaProducer.send(producerRecord);
        }
        kafkaProducer.close();
    } 
}

自定義分區代碼

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;

import java.util.Map;

/*
實現自定義分區
 */
public class ProducerPartition implements Partitioner {
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        //把生產的數據100以內的數據分發到分區0中,100-200以內的數據分發到分區1中,200-300內的數據分發到分區2中
        Integer data = Integer.valueOf(value.toString());
        if (data<100)return 0;
        if (data>=100 && data<200)return 1;
        if (data>=200 && data<300)return 2;
        else return -1;
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

Cosmer消費者代碼

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.Properties;

/*
消費/讀取kafka集羣的數據
 */
public class Consumer {

    public static void main(String[] args){
        Properties props = new Properties();
        //指定kafka服務器
        props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        //消費組
        props.put("group.id", "test");
        //以下兩行代碼 ---消費者自動提交offset值
        props.put("enable.auto.commit", "true");
        //設置 當各分區下有已提交的offset時,從提交的offset開始消費;無提交的offset時,從頭開始消費
        props.put("auto.offset.reset","earliest");
        //自動提交的週期
        props.put("auto.commit.interval.ms",  "1000");
        //設置key value的序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //2、實例Consumer
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(props);
        //3、設置讀取的topic
        kafkaConsumer.subscribe(Arrays.asList("title"));
        while (true){
            //4、拉取數據,並輸出
            //獲取所有數據
            ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(1000);
            //遍歷所有數據
            for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                //獲取一條數據內的信息
                System.out.println("消費的數據是"+consumerRecord.value()+"  分區是"+consumerRecord.partition());
            }
        }
    }
}

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章