Kafka生產者消費者java demo

在pom.xml加入:

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.11</artifactId>
    <version>1.0.1</version>
</dependency>
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>1.0.1</version>
</dependency>

公共屬性:

 

public class KafkaProperties {
    public static final String BLOKER= "192.168.15.169:9092";
    public static final String TOPIC= "jason";

} 

生產者:

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.log4j.Logger;

import java.util.Properties;

/******************************************************
 ****** @ClassName   : Producer.java
 ****** @author      : jason ^ ^
 ****** @date        : 2019 09 20 11:34
 ****** @version     : v1.0.x
 *******************************************************/
public class Producer {

    static Logger log = Logger.getLogger(Producer.class);

    private static KafkaProducer<String,String> producer = null;

    /*
    初始化生產者
     */
    static {
        Properties configs = initConfig();
        producer = new KafkaProducer<String, String>(configs);
    }

    /*
    初始化配置
     */
    private static Properties initConfig(){
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,KafkaProperties.BLOKER);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
        return properties;
    }

    public static void main(String[] args) throws InterruptedException {
        //消息實體
        ProducerRecord<String , String> record = null;
        for (int i = 0; i < 100000; i++) {
            record = new ProducerRecord<String, String>(KafkaProperties.TOPIC, "value"+(int)(10*(Math.random())));
            //發送消息
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (null != e){
                        log.info("send error" + e.getMessage());
                    }else {
                        System.out.println(String.format("offset:%s,partition:%s",recordMetadata.offset(),recordMetadata.partition()));
                    }
                }
            });
        }
        producer.close();
    }
}

 

消費者:

 

package com.test.kafka;


import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.log4j.Logger;

import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/******************************************************
 ****** @ClassName   : Consumer.java
 ****** @author      : jason ^ ^
 ****** @date        : 2019 09 20 15:50
 ****** @version     : v1.0.x
 *******************************************************/
public class Consumer implements Runnable{

    private static  Properties props;
    static {
        props = new Properties();

        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaProperties.BLOKER);
        //設置組id
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

    }

    @Override
    public void run() {
        //創建一個kafka的consumer
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        consumer.subscribe(Arrays.asList(KafkaProperties.TOPIC));

        while (true) {
            //獲取給定分區的記錄列表
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("ThreadID:%s, offset = %d, key = %s, value = %s%n", Thread.currentThread().getId(), record.offset(), record.key(), record.value());
            }
        }

    }

    public static void main(String[] args) {
        ExecutorService executorService = Executors.newFixedThreadPool(3);
        executorService.submit(new Consumer());
        executorService.submit(new Consumer());
        executorService.submit(new Consumer());
    }
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章