KafkaAPI 和 KafkaStreamAPI

pom

    <dependencies>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>1.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-streams</artifactId>
            <version>1.0.0</version>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <!-- java編譯插件 -->
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.2</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
            </plugin>
        </plugins>
    </build>

生產者producer代碼

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/**
 * @author dell
 * @version v 1.0
 * @date 2020.3.30
 */
public class OrderProducer {
    public static void main(String[] args) {
        //1.配置kafka集羣環境(設置)
        Properties p = new Properties();
        //kafka服務器地址
        p.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
        //消息確認機制
        p.put("acks", "all");
        //重試機制
        p.put("retries", 0);
        //批量發送大小
        p.put("batch.size", 16384);
        //消息延遲
        p.put("linger.ms", 1);
        //批量的緩衝區大小
        p.put("buffer.memory", 33554432);
        //kafka key 和 value 的序列化
        p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        //todo 自定義分區添加項  包名點 類名
        p.put("partitioner.class", "KafkaCustomPartitioner");

        //2. 示例一個生產者對象
        KafkaProducer<String, String> producer = new KafkaProducer<>(p);

        //3.通過生產者對象將數據發送到kafka集羣
        for (int i = 0; i < 10; i++) {
        	//第一個是 topic  (必填)
        	//第二個是分區(可以不指定) 指定分區必須指定key,數據會全部存入到指定的分區中 key(相當無效)
        	//第三個是指定key,如果不指定分區,只指定key,會對key取hash值,然後對分區取餘,餘幾數據就放到哪個分區(可以不指定)
        	//第四個參數是要傳輸的數據 (必填)
            //ProducerRecord<String, String> record = new ProducerRecord<>("18BD12_2", 0, "test", "發送的數據:" + i);
            ProducerRecord<String, String> record = new ProducerRecord<>("18BD12_2", "發送的數據:" + i);
            //發送
            producer.send(record);
        }
        // 4. 關閉連接
        producer.close();
    }
}

自定義分區代碼

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;

import java.util.List;
import java.util.Map;
import java.util.Random;

/**
 * @author dell
 */
public class KafkaCustomPartitioner implements Partitioner {
	@Override
	public void configure(Map<String, ?> configs) {
	}

	@Override
	public int partition(String topic, Object arg1, byte[] keyBytes, Object arg3, byte[] arg4, Cluster cluster) {
		//獲取分區
		List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
		//獲取分區的個數
	    int partitionNum = partitions.size();
	    //創建隨機隊列
		Random random = new Random();
		//返回分區
		return random.nextInt(partitionNum);
	}

	@Override
	public void close() {
		
	}
}

消費者代碼,通過分區進行拉取

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.Set;

/**
 * @author dell
 * @version v 1.0
 * @date 2020.3.30
 */
public class ConsumerPartitioner {
    public static void main(String[] args) {
        //1.添加配置文件
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
        props.put("group.id", "test");

        //todo  ---消費者手動提交offset值 (推薦)
        props.put("enable.auto.commit", "false");

        //kafka key 和 value 的序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        //2.實例消費對象
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        //3.設置讀取的topic
        consumer.subscribe(Collections.singletonList("18BD12_2"));

        //4.拉取數據並輸出
        //循環遍歷
        while (true) {
            //獲取到所有的數據
            ConsumerRecords<String, String> records = consumer.poll(1000);
            //獲取到所有的分區
            Set<TopicPartition> partitions = records.partitions();
            //遍歷所有分區
            for (TopicPartition partition : partitions) {
                //獲取一個分區中的所有數據
                List<ConsumerRecord<String, String>> list = records.records(partition);
                //遍歷所有的數據獲取到一行數據
                for (ConsumerRecord<String, String> record : list) {
                    //獲取偏移量
                    long offset = record.offset();
                    //獲取值
                    String value = record.value();
                    //輸出分區
                    System.out.println("partition = " + partition);
                    //輸出偏移量
                    System.out.println("offset = " + offset);
                    //輸出值
                    System.out.println("value = " + value);
                }
                //提交offset
                consumer.commitAsync();
            }
        }
    }
}

kafkaStreamAPI

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;


import java.util.Properties;

/**
 * @author dell
 * 在18BD12讀取數據,將數據轉換成大寫,在寫入18BD12-1
 */
public class KafkaStream {

    public static void main(String[] args) {
        Properties props = new Properties();
        //設置程序的唯一標識
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application12");
        //設置kafka集羣
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        //設置序列化與反序列化
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());


        //實例一個計算邏輯
        StreamsBuilder streamsBuilder = new StreamsBuilder();
        //設置計算邏輯   stream 在哪裏讀取數據                ->                               to 將數據寫入哪裏
        streamsBuilder.stream("test01").mapValues(line -> line.toString().toUpperCase()).to("test02");


        //構建Topology對象(拓撲,流程)
        final Topology topology = streamsBuilder.build();


        //實例 kafka流
        KafkaStreams streams = new KafkaStreams(topology, props);
        //啓動流計算
        streams.start();


    }
}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章