kafka伪动态topic消费

ps:业务需求,kafka消费的topic是动态的(就是使用中改变),网上搜了许多也不理想,然后自己突发奇想模拟了一个。
原理:1、独立kafka消费模块为一个单独的jar文件
2、另起一个系统,定时器每五分钟查询数据库一次,发现topic改变后就java调用linux命令杀掉kafka 的jar进程

代码如下:

/**
 * 五分钟查询数据库有没有新增topic订阅
 */
@Component
@Slf4j
public class ConsumerJob  {
    @Autowired
    IComService comService;

    @Scheduled(cron="0 0/5 * * * ?")
    public void configureTasks() throws ParseException, InterruptedException {

        comService.checkKafka();
    }
}
@Slf4j
@Service
public class comServiceImpl implements IComService{
    String queueStr = "";//存储topic的全局变量
    @Autowired
    ComMapper comMapper;
    @Override
    public List<Queue> getAllQueue() {
        return comMapper.getAllQueue();
    }

    @Override
    public void checkKafka() throws InterruptedException {
        List<Queue> list = getAllQueue();
        String str = ListToStr(list);
        if(list.isEmpty()){

        }else if(queueStr.length()==0){
            log.info("=========== 初次启用kafka 进程 ==========");
            List<String> predatalist = LinuxCommand.runShell("ps -ef|grep predata.jar");
            if(predatalist.toString().indexOf("java -jar")!=-1){
                LinuxCommand.runShell("ps -ef | grep predata.jar | grep -v grep |  xargs kill -9");
            }
            Thread.sleep(5000);//等待5s
            String command = "nohup /u02/dyj/jdk1.8.0_251/bin/java -jar /u02/dyj/predata.jar  >>/u02/dyj/predata.log 2>&1 &";
            log.info("command:"+command);
            LinuxCommand.runShell(command);
            queueStr = str;
        }else if(queueStr.equals(str)){

        }else{
            log.info("========= topic订阅改变,旧topic:"+queueStr+",新topic:"+str+" =============");
            List<String> predatalist = LinuxCommand.runShell("ps -ef|grep predata.jar");
            if(predatalist.toString().indexOf("java -jar")!=-1){
                LinuxCommand.runShell("ps -ef | grep predata.jar | grep -v grep |  xargs kill -9");
            }
            Thread.sleep(5000);//等待5s
            String command = "nohup /u02/dyj/jdk1.8.0_251/bin/java -jar /u02/dyj/predata.jar  >>/u02/dyj/predata.log 2>&1 &";
            log.info("command:"+command);
            LinuxCommand.runShell(command);

            queueStr = str;
        }
    }

    private String ListToStr(List<Queue> list){
        String str = "";
        for (Queue queue:list) {
            str += queue.getRepBrchCode()+" ";
        }
        str = str.substring(0,str.length()-1);
        return str;
    }

}

kafka 单独jar文件
主要就是启动的时候取查询数据库然后消费

import com.yunda.predata.data.Queue;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.stereotype.Service;

import java.util.List;
@Configuration
@Slf4j
public class KafkaConfiguration implements InitializingBean {

    @Autowired
    comService comService;


    @Override
    public void afterPropertiesSet() throws Exception {
        List<Queue> logicTopicName = comService.someKindOfLocalLogic();//查询数据库
        String str ="";
        for (Queue queue: logicTopicName) {
            str +=queue.getRepBrchCode()+"_"+queue.getQueueName()+",";
        }
        str = str.substring(0,str.length()-1);
        System.setProperty("topicName", str);
        log.info("#########  当前消费topic:{} ########", str);
    }
}

topicName是这种格式,类似于abc,bcd,fg

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConsumerConfig {

    @Value("${kafka.consumer.servers}")
    private String servers;
    @Value("${kafka.consumer.enable.auto.commit}")
    private boolean enableAutoCommit;
    @Value("${kafka.consumer.session.timeout}")
    private String sessionTimeout;
    @Value("${kafka.consumer.auto.commit.interval}")
    private String autoCommitInterval;
    @Value("${kafka.consumer.group.id}")
    private String groupId;
    @Value("${kafka.consumer.auto.offset.reset}")
    private String autoOffsetReset;
    @Value("${kafka.consumer.concurrency}")
    private int concurrency;
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);//手动提交
        factory.getContainerProperties().setPollTimeout(1500);
        return factory;
    }

    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }


    public Map<String, Object> consumerConfigs() {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        return propsMap;
    }

    @Bean
    public Listener listener() {
        return new Listener();
    }

}
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;

@Slf4j
public class Listener {

    @Autowired
    comService comService;

    protected final Logger logger = LoggerFactory.getLogger(this.getClass());

//这个是以,切割topicName然后生成一个数组
    @KafkaListener(topics = {"#{'${topicName}'.split(',')}"})
    public void listen(ConsumerRecord<?, ?> record, Acknowledgment ack) {
        //logger.info("kafka的key: " + record.key());
        //logger.info("kafka的value: " + record.value().toString());
        //log.info("=====当前topics"+topicName+"=====================");
        log.info("============kafka开始消费===========");
        String recordValue = record.value().toString();
        comService.savePreData(recordValue,ack);//消费kafka
        log.info("============kafka消费结束===========");

    }



}

配置文件

kafka.consumer.zookeeper.connect=10.131.32.207:2181,10.131.32.208:2181,10.131.42.207:2181
kafka.consumer.servers=10.131.32.207:9092,10.131.32.208:9092,10.131.42.207:9092
kafka.consumer.enable.auto.commit=false
kafka.consumer.session.timeout=6000
kafka.consumer.auto.commit.interval=100
kafka.consumer.auto.offset.reset=earliest
kafka.consumer.group.id=test
kafka.consumer.concurrency=10
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章