canal同步數據到kafka配置及源碼

canal.properties 配置

instance.properties 配置

kafka的 server.properties 配置

canal客戶端代碼

import com.alibaba.fastjson.JSONObject;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry;
import com.alibaba.otter.canal.protocol.Message;
import com.example.demo.kafka.KafkaClient;
import org.springframework.stereotype.Component;

import java.net.InetSocketAddress;
import java.util.List;

@Component
public class CannalClient {
    private final static int BATCH_SIZE = 1000;
    public void canalService() {
        // 創建連接
        CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("localhost", 11111), "example", "", "");
        try {
            // 打開連接
            connector.connect();
            // 訂閱數據庫表,全部表
            connector.subscribe(".*\\..*");
            // 回滾到未進行 ack 的地方,下次 fetch 的時候,可以從最後一個沒有 ack 的地方開始拿
            connector.rollback();
            while (true) {
                // 獲取指定數量的數據
                Message message = connector.getWithoutAck(BATCH_SIZE);
                // 獲取批量 ID
                long batchId = message.getId();
                // 獲取批量的數量
                int size = message.getEntries().size();
                // 如果沒有數據
                if (batchId == -1 || size == 0) {
                    try {
                        // 線程休眠 2 秒
                        Thread.sleep(2000);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                } else {
                    List<CanalEntry.Entry> entries = message.getEntries();
                    for (CanalEntry.Entry entry : entries) {
                        if (entry.getEntryType().equals(CanalEntry.EntryType.ROWDATA)){
                            if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN || entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONEND){
                                continue;
                            }
                            // 獲取 storevalue,並反序列化
                            CanalEntry.RowChange rowChage;
                            try{
                                rowChage = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
                            }catch (Exception e){
                                throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(), e);
                            }
                            // 獲取當前entry是對哪個表的操作結果
                            String tableName = entry.getHeader().getTableName();
                            handle(tableName, rowChage);
                        }
                    }
                }
                connector.ack(batchId);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            connector.disconnect();
        }
    }
    private static void handle(String tableName, CanalEntry.RowChange rowChange){
        String canaltopic = "canaltopic";
        String partition = "partition10";
        //獲取操作類型:insert/update/delete類型
        CanalEntry.EventType eventType = rowChange.getEventType();
        if (eventType.equals(CanalEntry.EventType.INSERT)){
            List<CanalEntry.RowData> rowDataList = rowChange.getRowDatasList();
            for (CanalEntry.RowData rowData : rowDataList){
                JSONObject jsonObject = new JSONObject();
                List<CanalEntry.Column> afterColumnList = rowData.getAfterColumnsList();
                for (CanalEntry.Column column : afterColumnList){
                    jsonObject.put(column.getName(), column.getValue());
                }
                // 將數據發送到 kafka
                KafkaClient.send(canaltopic, partition, jsonObject.toString());
            }
        }
    }
}

kafka 生產者代碼

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.stereotype.Component;
import java.util.*;

@Component
public class KafkaClient {
    private static Producer<String, String> producer;
    static {
        producer = createProducer();
    }
    //創建Producer
    public static Producer<String, String> createProducer(){
        //kafka連接的配置信息
        Properties properties = new Properties();
        properties.put("bootstrap.servers","localhost:9092");
        properties.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
        return kafkaProducer;
    }
    //將數據寫入到kafka
    public static void send(String topic, String partition, String value){
        producer.send(new ProducerRecord<>(topic, partition, value));
    }
}

kafka 消費者代碼

import java.time.Duration;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.stereotype.Component;

@Component
public class KafkaConsumerOps{
    private static Consumer<String, String> consumer;
    static {
        consumer = createConsumer();
    }
    //創建Producer
    public static Consumer<String, String> createConsumer() {
        //kafka連接的配置信息
        Properties properties = new Properties();
        properties.put("bootstrap.servers","localhost:9092");
        properties.put("group.id", "logGroup");
        properties.put("enable.auto.commit", "false");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        Consumer<String, String> consu = new KafkaConsumer<String, String>(properties);
        return consu;
    }
    // 數據從kafka中進行消費
    public static void consumerOps(){
        //消費者訂閱topic
        consumer.subscribe(Collections.singletonList("canaltopic"));   // 指定 topic 進行消費
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(300));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.println("topic ==>> " + record.topic() + ", partition ==>> " + record.partition()
                            + ", offset ==>>" + record.offset() + ", key ==>>" + record.key() + ", value ==>>" + record.value());
                    try{
                        consumer.commitAsync();
                    }catch (Exception e){
                        e.printStackTrace();
                    }finally {
                        try {
                            consumer.commitSync();
                        }catch (Exception e){
                            e.printStackTrace();
                        }
                    }
                }
            }
        } finally {
            consumer.close();
        }
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章