消費kafka數據:處理消息模塊改成爲多線程的實現方式

package application;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 *多線程消費實現方式,就是在處理消息模塊改成多線程的實現方式
 * */
public class ThirdMultiConsumerThreadDemo {

    public static final String brokers = "localhost:9092";
    public static final String topic = "topic-spark";
    public static final String groupId = "group-spark";

    //每一個處理消息的RecordsHandler類在處理完消息之後都將對應的消費位移保存到共享變量offsets中
    public static Map<TopicPartition, OffsetAndMetadata> offsets;

    public static Properties initConfig(){
        Properties props = new Properties();

        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,brokers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);
        return props;
    }

    public static void main(String[] args) {
        Properties props = initConfig();
        KafkaConsumerThread comsumerThread = new KafkaConsumerThread(
          props,
          topic,
          Runtime.getRuntime().availableProcessors());

        comsumerThread.start();
    }

    public static class KafkaConsumerThread extends Thread{
        private KafkaConsumer<String,String> kafkaConsumer;
        private ExecutorService executorService;
        private int threadNumber;



        public KafkaConsumerThread(Properties props, String topic, int threadNumber){

            kafkaConsumer = new KafkaConsumer<String, String>(props);
            kafkaConsumer.subscribe(Collections.singletonList(topic));
            this.threadNumber = threadNumber;
            executorService = new ThreadPoolExecutor(
                    threadNumber,
                    threadNumber,
                    0L,
                    TimeUnit.MICROSECONDS,
                    new ArrayBlockingQueue<>(100),
                    /**CallerRunsPolicy():這個參數可以防止線程池的總體消費能力跟不上poll()拉去的能力,從而導致異常現象的發生。*/
                    new ThreadPoolExecutor.CallerRunsPolicy()
            );
        }

        public void run(){
            try{
                while (true){

                    ConsumerRecords<String,String> records = kafkaConsumer.poll(Duration.ofMinutes(100));
                    if(!records.isEmpty()){
                        executorService.submit(new RecordsHandler(records));
                    }

                    //對應的位移提交實現
                    synchronized (offsets){
                        if(!offsets.isEmpty()){
                            kafkaConsumer.commitSync(offsets);
                            offsets.clear();
                        }
                    }
                }
            }catch (Exception e){
                e.printStackTrace();
            }finally {
                kafkaConsumer.close();
            }
        }
    }

    public static class RecordsHandler extends Thread{
        public final ConsumerRecords<String,String> records ;

        public RecordsHandler(ConsumerRecords<String,String> records){
            this.records = records;
        }

        public void run(){

            for(TopicPartition tp: records.partitions()){
                List<ConsumerRecord<String,String>> tpRecords = records.records(tp);
                //業務處理:tpRecords

                
                long lastConsumerOffset = tpRecords.get(tpRecords.size() -1).offset();
                //對offsets加鎖,防止出現併發問題
                synchronized (offsets){
                    if(!offsets.containsKey(tp)){
                        offsets.put(tp,new OffsetAndMetadata(lastConsumerOffset +1));
                    }else {
                        long position = offsets.get(tp).offset();
                        if(position<lastConsumerOffset+1){
                            offsets.put(tp,new OffsetAndMetadata(lastConsumerOffset+1));
                        }
                    }
                }

            }
        }
    }

}

該代碼未解決異常情況下的位移覆蓋問題。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章