flink從kafka-topic固定offset消費的5種方式

package flinkdemo.sinkDemo;

import flinkdemo.sourceDemo.deserialization.ConsumerRecordKafkaDeserializationSchema;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import java.nio.charset.StandardCharsets;
import java.util.*;

import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.kafka.clients.consumer.ConsumerRecord;

/**
 * @author zhangkai
 * @create 2019/12/11
 */
public class SinkToKafka {
    public static void main(String[] args) throws Exception {
        String topicList = "otter_sms_0";
        System.out.println(topicList);

        StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();

//        設置checkpoint
//        see.setStateBackend((StateBackend)new RocksDBStateBackend("hdfs://localhost:9000/user/zhangkai/flink-checkpoints"));

//        see.setStateBackend((StateBackend)new RocksDBStateBackend("hdfs:///user/zhangkai/flink-checkpoints"));//
//        CheckpointConfig checkpointConfig = see.getCheckpointConfig();
//        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//        checkpointConfig.setCheckpointInterval(30000);
//        checkpointConfig.setMaxConcurrentCheckpoints(3);
//        checkpointConfig.setCheckpointTimeout(60000);

        Properties props = new Properties();
        props.put("bootstrap.servers", "10.2.40.10:9092");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  //key 反序列化
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");//value 反序列化
//        props.put("auto.offset.reset", "latest");
        props.put("group.id","GROUP_ID");


        FlinkKafkaConsumer011<ConsumerRecord> topicData = new FlinkKafkaConsumer011<>(Arrays.asList(topicList.split(",")), new ConsumerRecordKafkaDeserializationSchema(), props);
        //  從指定時間戳消費
        topicData.setStartFromTimestamp(1577433868000l);

        // 從指定offset位置開始消費
        /**
         * Map<KafkaTopicPartition, Long> 
         * KafkaTopicPartition構造函數有兩個參數,第一個爲topic名字,第二個爲分區數
         * Long參數指定的offset位置
         * 獲取offset信息,可以用過Kafka自帶的kafka-consumer-groups.sh腳本獲取
         */
        Map<KafkaTopicPartition, Long> specificStartupOffsets = new HashMap<>();
        specificStartupOffsets.put(new KafkaTopicPartition("otter_sms_0",0),3387036l);
        specificStartupOffsets.put(new KafkaTopicPartition("otter_sms_0",1),3182960l);
        specificStartupOffsets.put(new KafkaTopicPartition("otter_sms_0",2),2815761l);
        specificStartupOffsets.put(new KafkaTopicPartition("otter_sms_0",3),3591033l);
        specificStartupOffsets.put(new KafkaTopicPartition("otter_sms_0",4),3346657l);
        topicData.setStartFromSpecificOffsets(specificStartupOffsets);

        // 從topic的earliest開始消費,也就是從這個topic的最早消息開始消費
        topicData.setStartFromEarliest();

        // 從topic的latest開始消費,也就是從這個topic的最新消息開始消費
        topicData.setStartFromLatest();

        // 從topic中指定的group上次消費的位置開始消費,所以必須配置group.id參數
        topicData.setStartFromGroupOffsets();


        DataStreamSource<ConsumerRecord> consumerRecordDataStreamSource = see.addSource(topicData);
        SingleOutputStreamOperator<Object> map = consumerRecordDataStreamSource.map(new MapFunction<ConsumerRecord, Object>() {
            @Override
            public Object map(ConsumerRecord consumerRecord) throws Exception {

                String valueNew = new String((byte[]) consumerRecord.value(), StandardCharsets.UTF_8);

                return consumerRecord.partition()+"_"+ consumerRecord.timestamp()+"_"+ new String((byte[])consumerRecord.key())+"_" + consumerRecord.offset();
            }
        }).setParallelism(1);
        see.execute("kafka 2 kafka");
    }
}
發佈了16 篇原創文章 · 獲贊 1 · 訪問量 2040
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章