kafka的api操作(发布和订阅)

消息发布api

pom.xml

<dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.11.0.0</version>
</dependency>

producer.properties
生产数据的配置信息,放在resources下面

# Kafka服务端的主机名和端口号
bootstrap.servers=mini1:9092,mini2:9092,mini3:9092

# specify the compression codec for all data generated: none, gzip, snappy, lz4
compression.type=none

# 等待所有副本节点的应答 (follower角色的分区从leader角色的分区中同步完毕消息后,给leader反馈信息)
acks=all

#消息发送最大尝试次数
retries=1

#一批消息处理大小
batch.size=16384

# name of the partitioner class for partitioning events; default partition spreads data randomly
partitioner.class=Partition.Mypartition

# the maximum amount of time the client will wait for the response of a request
#request.timeout.ms=

# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for
#max.block.ms=

# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together
# 请求延时
linger.ms=1

# the maximum size of a request in bytes
#max.request.size=

# the default batch size in bytes when batching multiple records sent to a partition
#batch.size=

# the total bytes of memory the producer can use to buffer records waiting to be sent to the server
#发送缓存区内存大小
buffer.memory=33554432

#消息的key对应的序列化类
key.serializer=org.apache.kafka.common.serialization.IntegerSerializer

#消息的value对应的序列化类
value.serializer=org.apache.kafka.common.serialization.StringSerializer

consumer.properties

# list of brokers used for bootstrapping knowledge about the rest of the cluster
# format: host1:port1,host2:port2 ...
#定义kakfa 服务的地址
bootstrap.servers=mini1:9092,mini2:9092,mini3:9092
# consumer group id
group.id=test-consumer-group
# What to do when there is no initial offset in Kafka or if the current
# offset does not exist any more on the server: latest, earliest, none
#auto.offset.reset=
# 是否自动确认offset
enable.auto.commit=true
#  自动确认offset的时间间隔
auto.commit.interval.ms=500
# key的反序列化类
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
# value的反序列化类
value.deserializer=org.apache.kafka.common.serialization.StringDeserializer

第一个Demo

package api;
import org.apache.kafka.clients.producer.*;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class CustomProducer {

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties prop = new Properties();
        try {
            prop.load(CustomProducer.class.getClassLoader().getResourceAsStream("producer.properties"));
        } catch (IOException e) {
            e.printStackTrace();
        }
        Producer<String, String> producer = new KafkaProducer<String, String>(prop);
        for (int i = 0; i < 100; i++) {
            producer.send(new ProducerRecord<String, String>("first", i+"", i+"!"));
        }
        producer.close();
    }
}

带回调函数的Demo

package api;
import org.apache.kafka.clients.producer.*;

import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class CustomProducer1 {
    /**
     * 回调函数会在producer收到ack时调用,为异步调用,该方法有两个参数,
     * 分别是RecordMetadata和Exception,如果Exception为null,说明消息发送成功,
     * 如果Exception不为null,说明消息发送失败。
     * 注意:消息发送失败会自动重试,不需要我们在回调函数中手动重试。
     */
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties prop = new Properties();
        try {
			prop.load(CustomProducer.class.getClassLoader().getResourceAsStream("producer.properties"));
        } catch (IOException e) {
            e.printStackTrace();
        }

        Producer<Integer, String> producer = new KafkaProducer<Integer, String>(prop);
        for (int i = 0; i < 100; i++) {
            final ProducerRecord<Integer, String> record
                    = new ProducerRecord<Integer, String>("second", i, i + "!");
            producer.send(record, new Callback() {
                //回调函数,该方法会在Producer收到ack时调用,为异步调用
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    if (exception == null) {
                        System.out.println("topic->" + metadata.topic()+
                                ",partition->"+metadata.partition()+
                                ",key->"+record.key());
                    } else {
                        exception.printStackTrace();
                    }
                }
            });
        }
        producer.close();
    }
}

消息订阅api

 package api;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.io.IOException;
import java.util.Arrays;
import java.util.Properties;

public class CustomConsumer {

    public static void main(String[] args) {
        Properties prop = new Properties();
        try {
            prop.load(CustomProducer.class.getClassLoader().getResourceAsStream("consumer.properties"));
        } catch (IOException e) {
            e.printStackTrace();
        }
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);
        consumer.subscribe(Arrays.asList("first"));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("offset = %d, key = %s, value = %s, partition = %s%n",
                        record.offset(),
                        record.key(),
                        record.value(),
                        record.partition()
                );
            }
            consumer.commitSync();
            /**
             * 手动提交offset的方法有两种:
             * 分别是commitSync(同步提交)和commitAsync(异步提交)。
             * 两者的相同点是,都会将本次poll的一批数据最高的偏移量提交;
             * 不同点是,commitSync会失败重试,一直到提交成功(如果由于不可恢复原因导致,也会提交失败);
             * 而commitAsync则没有失败重试机制,故有可能提交失败。
             */
        }
    }
}

从各个分区的开始进行消费
subscribe的第二个参数

package api;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;

public class CustomConsumer1 {

    public static void main(String[] args) {
        Properties prop = new Properties();
        try {
            prop.load(CustomProducer.class.getClassLoader().getResourceAsStream("consumer.properties"));
        } catch (IOException e) {
            e.printStackTrace();
        }
        final KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);
        consumer.subscribe(Arrays.asList("first","second"), new ConsumerRebalanceListener() {
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {

            }
            //从各个分区的开始位置开始订阅
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                consumer.seekToBeginning(collection);
            }
        });
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("topic = %s, offset = %d, key = %s, value = %s, partition = %s%n",
                        record.topic(),
                        record.offset(),
                        record.key(),
                        record.value(),
                        record.partition()
                );
            }
            //所有消息订阅完毕就退出
            if(records.isEmpty()){
                break;
            }
            consumer.commitSync();
            /**
             * 手动提交offset的方法有两种:
             * 分别是commitSync(同步提交)和commitAsync(异步提交)。
             * 两者的相同点是,都会将本次poll的一批数据最高的偏移量提交;
             * 不同点是,commitSync会失败重试,一直到提交成功(如果由于不可恢复原因导致,也会提交失败);
             * 而commitAsync则没有失败重试机制,故有可能提交失败。
             */
        }
    }
}
改这个参数也可
props.put("auto.offset.reset","smallest")  以前的也消费 或者 只从新产生的开始消费
发布了91 篇原创文章 · 获赞 9 · 访问量 3677
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章