Kafka實戰——在SpringBoot中的應用
1. pom引用
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
2. YML配置
server:
port: 8081
spring:
kafka:
bootstrap-servers: 10.1.48.214:9092,10.1.48.214:9093,10.1.48.214:9094
producer: # 生產者
retries: 3 # 設置大於 0 的值,則客戶端會將發送失敗的記錄重新發送
batch-size: 16384
buffer-memory: 33554432
acks: 1
# 指定消息key和消息體的編解碼方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
group-id: default-group
enable-auto-commit: false
auto-offset-reset: earliest
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
max-poll-records: 500
listener:
# 當每一條記錄被消費者監聽器(ListenerConsumer)處理之後提交
# RECORD
# 當每一批poll()的數據被消費者監聽器(ListenerConsumer)處理之後提交
# BATCH
# 當每一批poll()的數據被消費者監聽器(ListenerConsumer)處理之後,距離上次提交時間大於TIME時提交
# TIME
# 當每一批poll()的數據被消費者監聽器(ListenerConsumer)處理之後,被處理record數量大於等於COUNT時提交
# COUNT
# TIME | COUNT 有一個條件滿足時提交
# COUNT_TIME
# 當每一批poll()的數據被消費者監聽器(ListenerConsumer)處理之後, 手動調用Acknowledgment.acknowledge()後提交
# MANUAL
# 手動調用Acknowledgment.acknowledge()後立即提交,一般使用這種
# MANUAL_IMMEDIATE
ack-mode: MANUAL_IMMEDIATE
redis:
host: 10.1.48.214
3. 生產者
package com.example.kafkaspringboot.controller;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class KafkaController {
//發送消息到指定topic
private final static String TOPIC_NAME = "my-replicated-topic";
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@RequestMapping("/send")
public void send() {
kafkaTemplate.send(TOPIC_NAME, 0 , "key", "this is a msg");
}
}
4. 消費者
package com.example.kafkaspringboot.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
@Component
public class MyConsumer {
//1. 設置消費者 消費指定topic
@KafkaListener(topics = "my-replicated-topic",groupId = "MyGroup1")
public void listenGroup(ConsumerRecord<String, String> record, Acknowledgment ack) {
String value = record.value();
System.out.println(value);
System.out.println(record);
//手動提交offset
ack.acknowledge();
}
//設置消費組、多topic、指定分區、指定偏移量消費及設置消費者個數
// @KafkaListener(groupId = "testGroup", topicPartitions = {
// @TopicPartition(topic = "topic1", partitions = {"0", "1"}),
// @TopicPartition(topic = "topic2", partitions = "0",
// partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "100"))
// },concurrency = "3")//concurrency就是同組下的消費者個數,就是併發消費數,建議小於等於分區總數
// public void listenGroupPro(ConsumerRecord<String, String> record, Acknowledgment ack) {
// String value = record.value();
// System.out.println(value);
// System.out.println(record);
// //手動提交offset
// ack.acknowledge();
// }
/*@KafkaListener(topics = "my-replicated-topic",groupId = "MyGroup2")
public void listensGroup(ConsumerRecords<String, String> records, Acknowledgment ack) {
String value = record.value();
System.out.println(value);
System.out.println(record);
//手動提交offset
ack.acknowledge();
}*/
}
5. 學習來源
嗶哩嗶哩:https://www.bilibili.com/video/BV1Xy4y1G7zA