概述
最近在使用kafka,需要往kafka中發送消息以及消費,這裏就複習下kafka api的基本使用情況
環境
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.1.6.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-freemarker</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId> org.springframework.kafka </groupId>
<artifactId>spring-kafka</artifactId>
<version>2.2.0.RELEASE</version>
</dependency>
<!--<dependency>-->
<!--<groupId>org.apache.kafka</groupId>-->
<!--<artifactId>kafka-streams</artifactId>-->
<!--<version>1.0.0</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.apache.kafka</groupId>-->
<!--<artifactId>kafka-clients</artifactId>-->
<!--<version>1.1.0</version>-->
<!--</dependency>-->
</dependencies>
kafka java api
我們先來看看java api 然後在對比使用springboot整合
- KafkaConfig
獲取KafkaProducer 和KafkaConsumer實例,這裏寫成了單例模式
package com.example.springboot.kakfajavaapi;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import java.util.Properties;
public class KafkaConfig {
private static KafkaProducer<String, String> kafkaProducer;
private static KafkaConsumer<String, String> kafkaConsumer;
public static KafkaProducer<String, String> getKafkaProducer() {
if (null == kafkaProducer) {
synchronized (KafkaConfig.class) {
if (null == kafkaProducer) {
Properties props = new Properties();
props.put("acks", "1");
props.put("bootstrap.servers", "hadoop001:9092,hadoop002:9092,hadoop003:9092");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaProducer = new KafkaProducer<>(props);
}
}
}
return kafkaProducer;
}
public static KafkaConsumer<String, String> getKafkaConsumer() {
if (null == kafkaConsumer){
synchronized (KafkaConfig.class){
if (null == kafkaConsumer){
Properties props = new Properties();
props.put("bootstrap.servers", "hadoop001:9092,hadoop002:9092,hadoop003:9092");
//每個消費者分配獨立的組號
props.put("group.id", "testzy");
//如果value合法,則自動提交偏移量
props.put("enable.auto.commit", "true");
//設置多久一次更新被消費消息的偏移量
props.put("auto.commit.interval.ms", "1000");
//設置會話響應的時間,超過這個時間kafka可以選擇放棄消費或者消費下一條消息
props.put("session.timeout.ms", "30000");
//自動重置offset
props.put("auto.offset.reset","earliest");
props.put("key.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
kafkaConsumer = new KafkaConsumer<String, String>(props);
}
}
}
return kafkaConsumer;
}
}
- KafkaProducerTrain (生產者)
package com.example.springboot.kakfajavaapi;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class KafkaProducerTrain {
private final String TOPIC = "test";
public void producer(){
KafkaProducer<String, String> kafkaProducer = KafkaConfig.getKafkaProducer();
for (int i = 0; i < 10; i++){
String key = "key_"+i;
String date = "test"+i;
kafkaProducer.send(new ProducerRecord<String, String>(TOPIC,key,date));
}
kafkaProducer.close();
}
public static void main(String[] args) {
new KafkaProducerTrain().producer();
}
}
- KafkaConsumerTrain(消費者)
package com.example.springboot.kakfajavaapi;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
public class KafkaConsumerTrain {
private final String TOPIC = "test";
public void consumer(){
KafkaConsumer<String, String> kafkaConsumer = KafkaConfig.getKafkaConsumer();
kafkaConsumer.subscribe(Arrays.asList(TOPIC));
while (true){
ConsumerRecords<String, String> poll = kafkaConsumer.poll(100);
poll.forEach(record -> {
System.out.printf("offset = %d, key = %s, value = %s",record.offset(), record.key(), record.value()+"\n");
});
}
}
public static void main(String[] args) {
new KafkaConsumerTrain().consumer();
}
}
Spring boot 整合kafka
- KafkaProducer
package com.example.springboot.kafka.producer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import java.util.UUID;
/**
* 生產者
* 使用@EnableScheduling註解開啓定時任務
*/
@Component
@EnableScheduling
public class KafkaProducer {
@Autowired
private KafkaTemplate kafkaTemplate;
/**
* 定時發送
*/
@Scheduled(cron = "00/1 * * * * ?")
public void send(){
int i = 0;
String message = UUID.randomUUID().toString();
//參數: 發送到哪個topic 發送到哪個分區 key value
ListenableFuture future = kafkaTemplate.send("test", 5,"key_"+(++i) , message);
future.addCallback( o-> System.out.println("send-消息發送成功:" + message), throwable -> System.out.println("消息發送失敗:" + message));
}
}
- KafkaConsumer
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.util.Optional;
/**
* 消費者
* 使用@KafkaListener註解,可以指定:主題,分區,消費組
*/
@Component
public class KafkaConsumer {
@KafkaListener(topics = {"test"}, topicPattern = "10", containerGroup = "testzy")
public void receive(ConsumerRecord<?, ?> record){
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
System.out.printf("offset = %d, key = %s, value = %s, partition = %s",record.offset(), record.key(), record.value(), record.partition()+"\n");
}
}
}
看完springboot 整合kafka大家感覺這樣太簡單了,簡單的原因是底層給我們處理了,通過kafkaTemplate
,以及@KafkaListener
.所以我們更應該去學習底層原理.
kafka 常用命令
- 創建topic
kafka-topics --create --zookeeperlocalhost:2181 --replication-factor 3 --partitions 10 --topic test
- 查詢topic列表
kafka-topics --list --zookeeper localhost:2181/kafka
- 查看topic詳情
kafka-topics --describe --zookeeper localhost:2181/kafka --topic test
- 查看有哪些消費者組
kafka-consumer-groups --zookeeper localhost:2181/kafka --list
- 查看kafka某個消費組偏移量
舊版本:
kafka-consumer-groups --zookeeper localhost:2181/kafka --group groupid --describe
新版本
kafka-consumer-groups --new-consumer --bootstrap-server localhost:9092 --describe --group groupid
- 查看某一個topic對應的消息數量
kafka-run-class kafka.tools.GetOffsetShell --broker-list localhost:9092/kafka --topic TAG-EXCHANGE-INSTANCE-HUMAN-TOPIC --time -1
- 啓動生產者
kafka-console-producer --topic test --broker-list localhost:9092
- 啓動消費者
kafka-console-consumer --topic test --zookeeper localhost:2181/kafka