Springboot2整合Kafka

依賴

<dependency>
   <groupId>org.springframework.kafka</groupId>
   <artifactId>spring-kafka</artifactId>
</dependency>

配置

spring:
  kafka:
    bootstrap-servers: 外網ip:9092
    producer:
      retries: 0
      batch-size: 16384
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      acks: -1
    consumer:
      group-id: test
      auto-offset-reset: earliest
      enable-auto-commit: false
      auto-commit-interval: 100
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      max-poll-records: 10
    listener:
      concurrency: 3
      type: batch
      ack-mode: manual

配置類

@Configuration
@EnableKafka
public class KafkaConfig {

    @Autowired
    private KafkaProperties properties;

    /**
     * 創建一個新的消費者工廠
     * 創建多個工廠的時候 SpringBoot就不會自動幫忙創建工廠了;所以默認的還是自己創建一下
     * @return
     */
    @Bean
    public ConsumerFactory<Object, Object> kafkaConsumerFactory() {
        Map<String, Object> map =  properties.buildConsumerProperties();
        DefaultKafkaConsumerFactory<Object, Object> factory = new DefaultKafkaConsumerFactory<>( map);
        return factory;
    }

    /**
     * 創建一個新的消費者工廠
     * 但是修改爲不自動提交
     *
     * @return
     */
    @Bean
    public ConsumerFactory<Object, Object> kafkaManualConsumerFactory() {
        Map<String, Object> map =  properties.buildConsumerProperties();
        map.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        DefaultKafkaConsumerFactory<Object, Object> factory = new DefaultKafkaConsumerFactory<>( map);
        return factory;
    }


    /**
     * 手動提交的監聽器工廠 (使用的消費組工廠必須 kafka.consumer.enable-auto-commit = false)
     * @return
     */
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaManualAckListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(kafkaManualConsumerFactory());
        //設置提交偏移量的方式 當Acknowledgment.acknowledge()偵聽器調用該方法時,立即提交偏移量
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        //帶批量功能
        factory.setBatchListener(true);
        return factory;
    }

    /**
     * 監聽器工廠 批量消費
     * @return
     */
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> batchFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(kafkaConsumerFactory());
        factory.setBatchListener(true);
        return factory;
    }
}

生產者

消息實體類

@AllArgsConstructor
@Data
@NoArgsConstructor
@ToString
public class Message {
    private Long id;
    private String msg;
    private Date time;
}
@Component
@Slf4j
public class KafkaProducer {
    private static final String TOPIC = "pktest";
    @Autowired
    private KafkaTemplate<String,String> kafkaTemplate;

    @SuppressWarnings("unchecked")
    public void produce(Message message) {
        try {
            ListenableFuture future = kafkaTemplate.send(TOPIC, JSON.toJSONString(message));
            SuccessCallback<SendResult<String,String>> successCallback = new SuccessCallback<SendResult<String, String>>() {
                @Override
                public void onSuccess(@Nullable SendResult<String, String> result) {
                    log.info("發送消息成功");
                }
            };
            FailureCallback failureCallback = new FailureCallback() {
                @Override
                public void onFailure(Throwable ex) {
                    log.error("發送消息失敗",ex);
                    produce(message);
                }
            };
            future.addCallback(successCallback,failureCallback);
        } catch (Exception e) {
            log.error("發送消息異常",e);
        }
    }

    @Scheduled(fixedRate = 1000 * 10)
    public void send() {
        Message message = new Message(12L,"helloword",new Date());
        produce(message);
    }
}

消費者

@Component
@Slf4j
public class KafkaConsumer {
    @KafkaListener(topics = "pktest", containerFactory = "kafkaManualAckListenerContainerFactory")
    public void consumer(ConsumerRecords<String, String> records, Acknowledgment ack) {
        try {
            records.partitions().stream().flatMap(partition -> records.records(partition).stream())
                    .forEach(record -> {
                        log.info("接收消息: offset = {}, key = {}, value = {} ",
                                record.offset(), record.key(), record.value());
                        ack.acknowledge();
                    });
        } catch (Exception e) {
            log.error("kafka接收消息異常", e);
        }
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章