常用命令:
創建topic
bin/kafka-topics.sh --create --zookeeper node01:2181,node02:2181,node03:2181 --replication-factor 2 --partitions 3 --topic 18BD12-1
查看topic
bin/kafka-topics.sh --list --zookeeper node01:2181,node02:2181,node03:2181
查看topic的結構
bin/kafka-topics.sh --describe --zookeeper node01:2181,node02:2181,node03:2181 --topic 18BD12-1
模擬生產者生產數據
bin/kafka-console-producer.sh --broker-list node01:9092,node02:9092,node03:9092 --topic 18BD12
修改分區數量
bin/kafka-topics.sh --zookeeper node01:2181,node02:2181,node03:2181 --alter --topic 18BD12 --partitions 4
模擬消費者消費數數據
bin/ kafka-console-consumer.sh --from-beginning --topic 18BD12 --zookeeper node01:2181,node02:2181,node03:2181
添加配置
bin/kafka-topics.sh --zookeeper node01:2181 --alter --topic test --config flush.messages=1
刪除配置
bin/kafka-topics.sh --zookeeper node01:2181 --alter --topic test --delete-config flush.messages
刪除topic
kafka-topics.sh --zookeeper zkhost:port --delete --topic topicName
JavaAPI使用:
生產者
package com.czxy.producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* 用於生產數據到kafka集羣
*/
public class Producer1 {
public static void main(String[] args) {
// 1. 配置kafka集羣環境
Properties props = new Properties();
// kafka 服務器地址
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
// 消息確認機制
props.put("acks", "all");
//重試機制
props.put("retries", 0);
// 批量發送的大小
props.put("batch.size", 16384);
// 消息延遲
props.put("linger.ms", 1);
// 批量的緩衝區大小
props.put("buffer.memory", 33554432);
// kafka key value 緩衝區
props.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
// 自定義分區
// props.put("partitioner.class", "Producer.ProducerPartition");
// 2. 實例一個生產者對象
KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(props);
// 3. 通過生產者將數據發送到kafka集羣
for (int i = 0; i < 10; i++) {
//1、沒有指定分區編號,沒有指定key,時採用輪詢方式存戶數據
ProducerRecord producerRecord = new ProducerRecord("18BD12_1", "JAV2AAPi" + i);
//2、沒有指定分區編號,指定key時,數據分發策略爲對key求取hash值,這個值與分區數量取餘,於數就是分區編號。
//ProducerRecord producerRecord = new ProducerRecord("18BD12_1", "key", "JAV2AAPi" + i);
//3、指定分區編號,所有數據輸入到指定的分區內
//ProducerRecord producerRecord = new ProducerRecord("18BD12_1", 1, "", "JAV2AAPi" + i);
//4、定義分區策略
//ProducerRecord producerRecord = new ProducerRecord("18BD12_1", 1, "", "JAV2AAPi" + i);
kafkaProducer.send(producerRecord);
}
// 關閉生產者
kafkaProducer.close();
}
}
消費者
自動提交offset
package com.czxy.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
/**
* 消費者
*/
public class Consumer1 {
public static void main(String[] args) {
// 1.添加配置文件
Properties props = new Properties();
// 添加kafka服務器
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
// 指定消費組
props.put("group.id", "test");
//以下兩行代碼 ---消費者自動提交offset值
props.put("enable.auto.commit", "true");
//自動提交的週期
props.put("auto.commit.interval.ms", "1000");
// kafka key value 反序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 2.實例消費對象
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);
// 3.設置讀哪個topic
kafkaConsumer.subscribe(Arrays.asList("18BD12_1"));
// 循環遍歷拉取數據
while (true) {
// 4.拉取數據並輸出
//獲取到所有的數據
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(1000);
// 循環遍歷數據 獲取一條數據
for (ConsumerRecord<String, String> record : consumerRecords) {
String value = record.value();
System.out.println(value);
}
}
}
}
手動提交offset
package com.czxy.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.awt.event.KeyAdapter;
import java.util.Arrays;
import java.util.Properties;
/**
* 手動提交offset
*/
public class Consumer2 {
public static void main(String[] args) {
// 1.添加配置文件
Properties props = new Properties();
// 添加kafka服務器
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
// 指定消費組
props.put("group.id", "test1");
//以下兩行代碼 ---消費者自動提交offset值
props.put("enable.auto.commit", "false");
//自動提交的週期
// props.put("auto.commit.interval.ms", "1000");
// kafka key value 反序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//2.實例消費者對象
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);
//3.指定topic
kafkaConsumer.subscribe(Arrays.asList("18BD12_1"));
// 循環遍歷
while (true){
//4.循環拉取數據並輸出
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(1000);
for (ConsumerRecord<String, String> record : consumerRecords) {
System.out.println(record.value());
}
// 手動提交offset
kafkaConsumer.commitAsync();
}
}
}
以分區爲單位進行讀取
package com.czxy.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/*
以分區爲單位進行消費
*/
public class ConsumerPartition {
public static void main(String[] args) {
// 1.添加配置文件
Properties props = new Properties();
// 添加kafka服務器
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
// 指定消費組
props.put("group.id", "test1");
//以下兩行代碼 ---消費者自動提交offset值
props.put("enable.auto.commit", "false");
//自動提交的週期
// props.put("auto.commit.interval.ms", "1000");
// kafka key value 反序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 2.實例消費對象
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);
// 3.添加topic
kafkaConsumer.subscribe(Arrays.asList("18BD12_1"));
while (true) {
// 4.循環獲取數據
// 根據分區讀取數據
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(1000);
// 獲取所有的分區
Set<TopicPartition> partitions = consumerRecords.partitions();
// 遍歷分區
for (TopicPartition partition : partitions) {
// 獲取每個分區的數據
List<ConsumerRecord<String, String>> records = consumerRecords.records(partition);
// 遍歷分區所有數據 獲取每一條數據
for (ConsumerRecord<String, String> record : records) {
System.out.println(record.value());
}
// 5.手動提交offset(一個分區提交一次)
kafkaConsumer.commitAsync();
}
}
}
}
消費制定的分區
package Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.Set;
/*
消費指定分區的數據
*/
public class ConsumerMyPartition {
public static void main(String[] args){
//1配置文件
Properties props = new Properties();
//指定kafka服務器
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
//消費組
props.put("group.id", "test1");
//以下兩行代碼 ---消費者自動提交offset值
props.put("enable.auto.commit", "false");
//自動提交的週期
//props.put("auto.commit.interval.ms", "1000");
//kafka key 和value的反序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//2消費者
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(props);
//3、設置topic
// 添加消費配置
TopicPartition topicPartition0 = new TopicPartition("18BD12",0);
TopicPartition topicPartition2 = new TopicPartition("18BD12",1);
kafkaConsumer.assign(Arrays.asList(topicPartition0,topicPartition2));
while (true){
//4、拉取數據,並輸出
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(1000);
//通過數據獲取到多有的分區 0 2
Set<TopicPartition> partitions = consumerRecords.partitions();
//遍歷所有分區,或得到一個分區
for (TopicPartition partition : partitions) {
//獲取每個分區的數據,多條數據
List<ConsumerRecord<String, String>> records = consumerRecords.records(partition);
//遍歷分區內的所有數據,或得到一條
for (ConsumerRecord<String, String> record : records) {
System.out.println(record.value()+" "+record.partition());
}
//手動提交offset
kafkaConsumer.commitSync();
}
}
}
}