Java api對kafka讀寫消息實例

Java 調用 kafka (kafka版本:kafka_2.11-0.11.0.1)的實例:

測試結論:

1、consumer.assign()手動分配partition,消費者的group.id不起作用,比較靈活,可以指定一個消費者監聽不同topic的不同partition。

consumer1 —》 (topic1.partition2, topic2.partition3, topic0.partition0)

consumer0 —》 (topic1.partition1, topic2.partition1, topic0.partition3)

2、consumer.subscribe()自動分配partition, 指定消費者的group.id。

當不同消費者在不同的group.id中時,按照kafka的默認配置,只有一個kafka組才能消費到消息。

當不同消費者處於相同的group.id中時,按照kafka的默認配置,不同的消費者會被分配到零個或多個partition。

 

生產者代碼:

public class Producer {

    private final KafkaProducer<String, String> producer;

    public final static String TOPIC = "jiqimao";

    private Producer() {
        producer = KafkaAdminUtils.createProducer();
    }

    public void produce() {
        int messageNo = 0;
        final int messageNum = 9;

        while(messageNo < messageNum) {
            String key = String.valueOf(messageNo);
            String data = String.format("%s, message %s.", LocalTime.now(), key);

            try {
                producer.send(new ProducerRecord(TOPIC, messageNo%3, messageNo, data), new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                        System.out.println("回調信息 --> offset = " + recordMetadata.offset() + ", partition = " + recordMetadata.partition());
                    }
                });
            } catch (Exception e) {
                e.printStackTrace();
            }

            messageNo++;
        }

        producer.close();
    }

    public static void main(String[] args) {
        KafkaAdminUtils.init();

        new Producer().produce();
    }
}

 

消費者代碼:

public class Consumer implements Runnable {

    private final int No;
    private final KafkaConsumer consumer;

    public final static List<String> TOPIC = Arrays.asList("jiqimao");

    private Consumer(int no){
        consumer = KafkaAdminUtils.createConsumer(TOPIC, "group-"+no, "group-1", no);
        No = no;
    }

    @Override
    public void run(){
        this.consume();
    }

    public void consume(){
        while(true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            System.out.printf("Consumer %d start comsume.\n", No);
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("consumer: " + No + ", topic = " + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset() + ", value = " + record.value());
            }
        }
    }

    public static void main(String[] args){
        KafkaAdminUtils.init();

//        new Consumer(0).consume();
//        new Consumer(1).consume();
//        new Consumer(2).consume();

        new Thread(new Consumer(0)).start();
        new Thread(new Consumer(1)).start();
//        new Thread(new Consumer(2)).start();

//        Consumer consumer1 = new Consumer(0);
//        Consumer consumer2 = new Consumer(1);
//        Consumer consumer3 = new Consumer("three");

//        consumer1.consume();
//        consumer2.consume();
//        consumer3.consume();

//        new Thread(consumer1).start();
//        new Thread(consumer2).start();
//        new Thread(consumer3).start();

//        try{
//            Thread.sleep(1000);
//        }catch (InterruptedException iE){
//            iE.printStackTrace();
//        }
    }
}

 

Kafka工具類:

public class KafkaAdminUtils {

    private static final String topic = "jiqimao";

    private static AdminClient admin;

    public static void initClient(){
        if (admin == null) {
            Properties props = new Properties();
            props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
            admin = AdminClient.create(props);
        }
    }

    public static void init(){
        try {
            initClient();
//            listTopics();
//            deleteTopics(Arrays.asList(topic));
//            listTopics();
//            createTopic(Arrays.asList(topic));
            listTopics();
            showTopicsDetail(Arrays.asList(topic));
        }catch (Exception e){
            e.printStackTrace();
        }
    }

    public static KafkaProducer createProducer(){
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092");//xxx服務器ip
        properties.put("acks", "all");//所有follower都響應了才認爲消息提交成功,即"committed"
        properties.put("retries", 0);//retries = MAX 無限重試,直到你意識到出現了問題:)
        properties.put("batch.size", 16384);//producer將試圖批處理消息記錄,以減少請求次數.默認的批量處理消息字節數
        //batch.size當批量的數據大小達到設定值後,就會立即發送,不顧下面的linger.ms
        properties.put("linger.ms", 1);//延遲1ms發送,這項設置將通過增加小的延遲來完成--即,不是立即發送一條記錄,producer將會等待給定的延遲時間以允許其他消息記錄發送,這些消息記錄可以批量處理
        properties.put("buffer.memory", 33554432);//producer可以用來緩存數據的內存大小。
        properties.put("key.serializer",
                "org.apache.kafka.common.serialization.IntegerSerializer");
        properties.put("value.serializer",
                "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
        return producer;
    }

    public static KafkaConsumer createConsumer(List<String> topics, String groupName, String groupId, int partition){
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092");//xxx是服務器集羣的ip
        properties.put("group.id", groupId);
        properties.put("group.name", groupName);
        properties.put("enable.auto.commit", "true");
        properties.put("auto.commit.interval.ms", "1000");
        properties.put("auto.offset.reset", "latest");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Arrays.asList(topic));                                   // 自動分配partition
//        consumer.assign(Arrays.asList(new TopicPartition(topic, partition)));     // 手動分配partition
        return consumer;
    }

    public static void createTopic(List<String> topics){
        List<NewTopic> newTopicList = new ArrayList<>();
        topics.forEach(topic -> {
            NewTopic newTopic = new NewTopic(topic,3, (short)1);
            newTopicList.add(newTopic);
        });
        admin.createTopics(newTopicList);
    }

    public static void deleteTopics(List<String> topics) throws InterruptedException, ExecutionException, TimeoutException {
        admin.deleteTopics(topics).all().get(10, TimeUnit.SECONDS);
    }

    public static Collection<String> listTopics() throws InterruptedException, ExecutionException {
        Collection list = admin.listTopics().listings().get().stream().map(TopicListing::name).collect(Collectors.toList());
        System.out.println(list);
        return list;
    }

    public static void showTopicsDetail(List<String> topics) throws InterruptedException, ExecutionException {
        admin.describeTopics(topics).all().get().forEach((topic, description) -> {
            System.out.println("==== Topic " + topic + " Begin ====");
            for (TopicPartitionInfo partition : description.partitions()) {
                System.out.println(partition.toString());
            }
            System.out.println("==== Topic " + topic + " End ====");
        });
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章