kafka僞動態topic消費

ps:業務需求,kafka消費的topic是動態的(就是使用中改變),網上搜了許多也不理想,然後自己突發奇想模擬了一個。
原理:1、獨立kafka消費模塊爲一個單獨的jar文件
2、另起一個系統,定時器每五分鐘查詢數據庫一次,發現topic改變後就java調用linux命令殺掉kafka 的jar進程

代碼如下:

/**
 * 五分鐘查詢數據庫有沒有新增topic訂閱
 */
@Component
@Slf4j
public class ConsumerJob  {
    @Autowired
    IComService comService;

    @Scheduled(cron="0 0/5 * * * ?")
    public void configureTasks() throws ParseException, InterruptedException {

        comService.checkKafka();
    }
}
@Slf4j
@Service
public class comServiceImpl implements IComService{
    String queueStr = "";//存儲topic的全局變量
    @Autowired
    ComMapper comMapper;
    @Override
    public List<Queue> getAllQueue() {
        return comMapper.getAllQueue();
    }

    @Override
    public void checkKafka() throws InterruptedException {
        List<Queue> list = getAllQueue();
        String str = ListToStr(list);
        if(list.isEmpty()){

        }else if(queueStr.length()==0){
            log.info("=========== 初次啓用kafka 進程 ==========");
            List<String> predatalist = LinuxCommand.runShell("ps -ef|grep predata.jar");
            if(predatalist.toString().indexOf("java -jar")!=-1){
                LinuxCommand.runShell("ps -ef | grep predata.jar | grep -v grep |  xargs kill -9");
            }
            Thread.sleep(5000);//等待5s
            String command = "nohup /u02/dyj/jdk1.8.0_251/bin/java -jar /u02/dyj/predata.jar  >>/u02/dyj/predata.log 2>&1 &";
            log.info("command:"+command);
            LinuxCommand.runShell(command);
            queueStr = str;
        }else if(queueStr.equals(str)){

        }else{
            log.info("========= topic訂閱改變,舊topic:"+queueStr+",新topic:"+str+" =============");
            List<String> predatalist = LinuxCommand.runShell("ps -ef|grep predata.jar");
            if(predatalist.toString().indexOf("java -jar")!=-1){
                LinuxCommand.runShell("ps -ef | grep predata.jar | grep -v grep |  xargs kill -9");
            }
            Thread.sleep(5000);//等待5s
            String command = "nohup /u02/dyj/jdk1.8.0_251/bin/java -jar /u02/dyj/predata.jar  >>/u02/dyj/predata.log 2>&1 &";
            log.info("command:"+command);
            LinuxCommand.runShell(command);

            queueStr = str;
        }
    }

    private String ListToStr(List<Queue> list){
        String str = "";
        for (Queue queue:list) {
            str += queue.getRepBrchCode()+" ";
        }
        str = str.substring(0,str.length()-1);
        return str;
    }

}

kafka 單獨jar文件
主要就是啓動的時候取查詢數據庫然後消費

import com.yunda.predata.data.Queue;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.stereotype.Service;

import java.util.List;
@Configuration
@Slf4j
public class KafkaConfiguration implements InitializingBean {

    @Autowired
    comService comService;


    @Override
    public void afterPropertiesSet() throws Exception {
        List<Queue> logicTopicName = comService.someKindOfLocalLogic();//查詢數據庫
        String str ="";
        for (Queue queue: logicTopicName) {
            str +=queue.getRepBrchCode()+"_"+queue.getQueueName()+",";
        }
        str = str.substring(0,str.length()-1);
        System.setProperty("topicName", str);
        log.info("#########  當前消費topic:{} ########", str);
    }
}

topicName是這種格式,類似於abc,bcd,fg

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConsumerConfig {

    @Value("${kafka.consumer.servers}")
    private String servers;
    @Value("${kafka.consumer.enable.auto.commit}")
    private boolean enableAutoCommit;
    @Value("${kafka.consumer.session.timeout}")
    private String sessionTimeout;
    @Value("${kafka.consumer.auto.commit.interval}")
    private String autoCommitInterval;
    @Value("${kafka.consumer.group.id}")
    private String groupId;
    @Value("${kafka.consumer.auto.offset.reset}")
    private String autoOffsetReset;
    @Value("${kafka.consumer.concurrency}")
    private int concurrency;
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);//手動提交
        factory.getContainerProperties().setPollTimeout(1500);
        return factory;
    }

    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }


    public Map<String, Object> consumerConfigs() {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        return propsMap;
    }

    @Bean
    public Listener listener() {
        return new Listener();
    }

}
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;

@Slf4j
public class Listener {

    @Autowired
    comService comService;

    protected final Logger logger = LoggerFactory.getLogger(this.getClass());

//這個是以,切割topicName然後生成一個數組
    @KafkaListener(topics = {"#{'${topicName}'.split(',')}"})
    public void listen(ConsumerRecord<?, ?> record, Acknowledgment ack) {
        //logger.info("kafka的key: " + record.key());
        //logger.info("kafka的value: " + record.value().toString());
        //log.info("=====當前topics"+topicName+"=====================");
        log.info("============kafka開始消費===========");
        String recordValue = record.value().toString();
        comService.savePreData(recordValue,ack);//消費kafka
        log.info("============kafka消費結束===========");

    }



}

配置文件

kafka.consumer.zookeeper.connect=10.131.32.207:2181,10.131.32.208:2181,10.131.42.207:2181
kafka.consumer.servers=10.131.32.207:9092,10.131.32.208:9092,10.131.42.207:9092
kafka.consumer.enable.auto.commit=false
kafka.consumer.session.timeout=6000
kafka.consumer.auto.commit.interval=100
kafka.consumer.auto.offset.reset=earliest
kafka.consumer.group.id=test
kafka.consumer.concurrency=10
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章