Spring boot集成Kafka消息中間件

一.創建Spring boot項目,添加如下依賴

<dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.springframework.kafka/spring-kafka -->
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.41</version>
        </dependency>

二.配置文件

server.port=4400

#kafka配置
#============== kafka ===================
# 指定kafka 代理地址,可以多個
spring.kafka.bootstrap-servers=192.168.102.88:9092
# 指定默認消費者group id
spring.kafka.consumer.group-id=jkafka.demo
#earliest  當各分區下有已提交的offset時,從提交的offset開始消費;無提交的offset時,從頭開始消費
#latest 當各分區下有已提交的offset時,從提交的offset開始消費;無提交的offset時,消費新產生的該分區下的數據
#none topic各分區都存在已提交的offset時,從offset後開始消費;只要有一個分區不存在已提交的offset,則拋出異常
spring.kafka.consumer.auto-offset-reset=latest
spring.kafka.consumer.enable-auto-commit=false
spring.kafka.consumer.auto-commit-interval=100
# 指定消費者消息key和消息體的編解碼方式
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer 
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

三.編輯消息實體

@Data
public class Message  implements Serializable{

    /**
     * 
     */
    private static final long serialVersionUID = 2522280475099635810L;

    //消息ID
    private String id;

    //消息內容
    private String msg;

    // 消息發送時間
    private Date sendTime;

}

四.消息發送類

@Component
public class KfkaProducer {

    private static Logger logger = LoggerFactory.getLogger(KfkaProducer.class);

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    public void send(String topic,Message message) {
        try {
            logger.info("正在發送消息...");
            kafkaTemplate.send(topic,JSON.toJSONString(message));
            logger.info("發送消息成功 ----->>>>>  message = {}", JSON.toJSONString(message));
        } catch (Exception e) {
            e.getMessage();
        }

    }
}

五.發現監聽接收類

@Component
public class KfkaListener {

    private static Logger logger = LoggerFactory.getLogger(KfkaListener.class);

    @KafkaListener(topics = {"hello"})
    public void listen(ConsumerRecord<?, ?> record) {
        Optional<?> kafkaMessage = Optional.ofNullable(record.value());
        if (kafkaMessage.isPresent()) {
            Object message = kafkaMessage.get();
            logger.info("接收消息------------ record =" + record);
            logger.info("接收消息----------- message =" + message);
        }
    }
}

六.定時發送信息測試類

@EnableScheduling
@Component
public class PublisherController {

    private static final Logger log = LoggerFactory.getLogger(PublisherController.class);

    @Autowired
    private KfkaProducer kfkaProducer;

    @Scheduled(fixedRate = 5000)
    public void pubMsg() {
        Message msg=new Message();
        msg.setId(UUID.randomUUID().toString());
        msg.setMsg("發送這條消息給你,你好啊!!!!!!");
        msg.setSendTime(new Date());
        kfkaProducer.send("hello", msg);;
        log.info("Publisher sendes Topic... ");
    }
}

七.測試結果
Spring boot集成Kafka消息中間件

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章