springboot下 kafka 手動創建topic並指定分區(partition)數及分區副本(replica)數

依賴:

 

       <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        

        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

 

application.properties:

 

### kafka configure
spring.kafka.bootstrap-servers=10.160.3.70:9092
spring.kafka.consumer.group-id=sea-test
spring.kafka.consumer.enable-auto-commit=false
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.max-poll-records=2000
#spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
#spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.producer.retries=3
spring.kafka.producer.batch-size=16384
spring.kafka.producer.buffer-memory=33554432
spring.kafka.producer.linger=10
#spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
#spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer

 

 

KafkaConfig:

package com.icil.topic.config;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.ContainerProperties;

import com.google.common.collect.Maps;

@Configuration
@EnableKafka
public class KafkaConfig {

    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    @Value("${spring.kafka.consumer.group-id}")
    private String groupId;

    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private Boolean autoCommit;

    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String autoOffsetReset;

    @Value("${spring.kafka.consumer.max-poll-records}")
    private Integer maxPollRecords;
    
    @Value("${spring.kafka.producer.linger}")
    private int linger;

    @Value("${spring.kafka.producer.retries}")
    private Integer retries;

    @Value("${spring.kafka.producer.batch-size}")
    private Integer batchSize;

    @Value("${spring.kafka.producer.buffer-memory}")
    private Integer bufferMemory;


    //cankao :https://blog.csdn.net/tmeng521/article/details/90901925
    public Map<String, Object> producerConfigs() {
         
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        //設置重試次數
        props.put(ProducerConfig.RETRIES_CONFIG, retries);
        //達到batchSize大小的時候會發送消息
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        //延時時間,延時時間到達之後計算批量發送的大小沒達到也發送消息
        props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
        //緩衝區的值
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        //序列化手段
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        //producer端的消息確認機制,-1和all都表示消息不僅要寫入本地的leader中還要寫入對應的副本中
        props.put(ProducerConfig.ACKS_CONFIG, "-1");//單個brok 推薦使用'1'
        //單條消息的最大值以字節爲單位,默認值爲1048576
        props.put(ProducerConfig.LINGER_MS_CONFIG, 10485760);
        //設置broker響應時間,如果broker在60秒之內還是沒有返回給producer確認消息,則認爲發送失敗
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
        //指定攔截器(value爲對應的class)
        //props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.te.handler.KafkaProducerInterceptor");
        //設置壓縮算法(默認是木有壓縮算法的)
        props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");//snappy
        return props;
    }
    
    
    
    @Bean //創建一個kafka管理類,相當於rabbitMQ的管理類rabbitAdmin,沒有此bean無法自定義的使用adminClient創建topic
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> props = new HashMap<>();
        //配置Kafka實例的連接地址                                                                   
        //kafka的地址,不是zookeeper
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        KafkaAdmin admin = new KafkaAdmin(props);
        return admin;
    }
 
    @Bean  //kafka客戶端,在spring中創建這個bean之後可以注入並且創建topic,用於集羣環境,創建對個副本
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin().getConfig());
    }

    

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

    
    
    
    
    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = Maps.newHashMap();
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
//        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 180000);
//        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 900000);
//        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 900000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }


    @Bean
    public KafkaListenerContainerFactory<?> batchFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
        //設置爲批量消費,每個批次數量在Kafka配置參數中設置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        // set the retry template
//        factory.setRetryTemplate(retryTemplate());
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        return factory;
    }


}

 

 

如果topic需要初始化:可以配置// 參考 :https://blog.csdn.net/tmeng521/article/details/90901925

 

@Configuration
public class KafkaInitialConfiguration {
 
    //創建TopicName爲topic.quick.initial的Topic並設置分區數爲8以及副本數爲1
    @Bean//通過bean創建(bean的名字爲initialTopic)
    public NewTopic initialTopic() {
        return new NewTopic("topic.quick.initial",8, (short) 1 );
    }
    /**
     * 此種@Bean的方式,如果topic的名字相同,那麼會覆蓋以前的那個
     * @return
     */
//    //修改後|分區數量會變成11個 注意分區數量只能增加不能減少
    @Bean
    public NewTopic initialTopic2() {
        return new NewTopic("topic.quick.initial",11, (short) 1 );
    }
    @Bean //創建一個kafka管理類,相當於rabbitMQ的管理類rabbitAdmin,沒有此bean無法自定義的使用adminClient創建topic
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> props = new HashMap<>();
        //配置Kafka實例的連接地址                                                                    //kafka的地址,不是zookeeper
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
        KafkaAdmin admin = new KafkaAdmin(props);
        return admin;
    }
 
    @Bean  //kafka客戶端,在spring中創建這個bean之後可以注入並且創建topic
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin().getConfig());
    }
    
 
}

 

test 手動創建topic ,手動查看所有topic

 

  @Autowired // adminClien需要自己生成配置bean
    private AdminClient adminClient;
    
    
     @Autowired
        private KafkaTemplate<String, String> kafkaTemplate;
 
    @Test//自定義手動創建topic和分區
    public void testCreateTopic() throws InterruptedException {
        // 這種是手動創建 //10個分區,一個副本
        // 分區多的好處是能快速的處理併發量,但是也要根據機器的配置
        NewTopic topic = new NewTopic("topic.manual.create", 10, (short) 1);
        adminClient.createTopics(Arrays.asList(topic));
        Thread.sleep(1000);
    }
    
    
    /**
     * 獲取所有的topic
     * @throws Exception
     */
    @Test
    public void getAllTopic() throws Exception {
        ListTopicsResult listTopics = adminClient.listTopics();
         Set<String> topics = listTopics.names().get();
         
         for (String topic : topics) {
             System.err.println(topic);
            
        }
    }

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章