SpringBoot實現Logback輸出日誌到Kafka實例

本文通過在SpringBoot應用中創建一個自定義的Appender從而實現Logback輸出日誌到Kafka。

pom.xml

pom.xml中配置相關的maven依賴

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.2.5.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.demo</groupId>
    <artifactId>log2kafka</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>log2kafka</name>
    <description>Demo project for send log to kafka</description>

    <properties>
        <java.version>1.8</java.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.60</version>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.junit.vintage</groupId>
                    <artifactId>junit-vintage-engine</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <!-- logback插件 -->
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>1.2.3</version>
        </dependency>
        <!-- kafka插件 -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>0.10.1.1</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>

</project>

不建議修改pom文件中的kafka版本,容易造成各種各樣的錯誤。

KafkaUtil.java

創建一個kafka工具類,用於配置生成Producer

package com.demo.log2kafka.util;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Properties;

public class KafkaUtil {

    public static Producer<String, String> createProducer(
            String bootstrapServers, String batchSize, String lingerMs,
            String compressionType, String retries, String maxRequestSize) {
        // 當配置項爲IS_UNDEFINED時,使用默認值
        if (bootstrapServers == null) {
            bootstrapServers = "localhost:9092";
        }
        if (batchSize.contains("IS_UNDEFINED")) {
            batchSize = "50000";
        }
        if (lingerMs.contains("IS_UNDEFINED")) {
            lingerMs = "60000";
        }

        if (retries.contains("IS_UNDEFINED")) {
            retries = "3";
        }
        if (maxRequestSize.contains("IS_UNDEFINED")) {
            maxRequestSize = "5242880";
        }

        Properties properties = new Properties();
        // kafka地址,集羣用逗號分隔開
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        // acks取值:
        // 0: kafka不返回確認信息,不保證record是否被收到,因爲沒有返回所以重試機制不會生效
        // 1: partition leader確認record寫入到日誌中,但不保證信息是否被正確複製(建議設爲該值)
        // all: leader會等待所有信息被同步後返回確認信息
        properties.put(ProducerConfig.ACKS_CONFIG, "1");
        properties.put(ProducerConfig.RETRIES_CONFIG, Integer.valueOf(retries));
        // 批量發送,當達到batch size最大值觸發發送機制(10.0後支持批量發送)
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.valueOf(batchSize));
        // 該配置是指在batch.size數量未達到時,指定時間內也會推送數據
        properties.put(ProducerConfig.LINGER_MS_CONFIG, Integer.valueOf(lingerMs));
        // 配置緩存
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        if (!compressionType.contains("IS_UNDEFINED")) {
            // 指定壓縮算法
            properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
        }
        // 每個請求的消息大小
        properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, Integer.valueOf(maxRequestSize));
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        return new KafkaProducer<String, String>(properties);
    }
}

KafkaAppender.java

package com.demo.log2kafka.appender;


import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.ConsoleAppender;
import com.demo.log2kafka.util.KafkaUtil;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class KafkaAppender extends ConsoleAppender<ILoggingEvent> {

    public static final Logger LOGGER = LoggerFactory.getLogger(KafkaAppender.class);

    private String bootstrapServers;
    private String topic;
    private String batchSize;
    private String lingerMs;
    private String compressionType;
    private String retries;
    private String maxRequestSize;
    private String isSend;

    private Producer<String, String> producer;

    @Override
    public String toString() {
        return "KafkaAppender{" +
                "bootstrapServers='" + bootstrapServers + '\'' +
                ", topic='" + topic + '\'' +
                ", batchSize='" + batchSize + '\'' +
                ", lingerMs='" + lingerMs + '\'' +
                ", compressionType='" + compressionType + '\'' +
                ", retries='" + retries + '\'' +
                ", maxRequestSize='" + maxRequestSize + '\'' +
                ", isSend='" + isSend + '\'' +
                ", producer=" + producer +
                '}';
    }

    @Override
    public void start() {
        super.start();
        if ("true".equals(this.isSend)) {
            if (producer == null) {
                producer = KafkaUtil.createProducer(this.bootstrapServers, this.batchSize,
                        this.lingerMs, this.compressionType, this.retries, this.maxRequestSize);
            }
        }
    }

    @Override
    public void stop() {
        super.stop();
        if ("true".equals(this.isSend)) {
            this.producer.close();
        }

        LOGGER.info(Markers.KAFKA, "Stopping kafkaAppender...");
    }

    @Override
    protected void append(ILoggingEvent eventObject) {
        byte[] byteArray;
        String log;
        // 對日誌格式進行解碼
        byteArray = this.encoder.encode(eventObject);
        log = new String(byteArray);
        ProducerRecord<String, String> record = new ProducerRecord<>(this.topic, log);
        if (eventObject.getMarker() == null && "true".equals(this.isSend)) {
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    if (exception != null) {
                        LOGGER.error(Markers.KAFKA, "Send log to kafka failed: [{}]", log);
                    }
                }
            });
        }
    }

    public String getBootstrapServers() {
        return bootstrapServers;
    }

    public void setBootstrapServers(String bootstrapServers) {
        this.bootstrapServers = bootstrapServers;
    }

    public String getTopic() {
        return topic;
    }

    public void setTopic(String topic) {
        this.topic = topic;
    }

    public String getBatchSize() {
        return batchSize;
    }

    public void setBatchSize(String batchSize) {
        this.batchSize = batchSize;
    }

    public String getLingerMs() {
        return lingerMs;
    }

    public void setLingerMs(String lingerMs) {
        this.lingerMs = lingerMs;
    }

    public String getCompressionType() {
        return compressionType;
    }

    public void setCompressionType(String compressionType) {
        this.compressionType = compressionType;
    }

    public String getRetries() {
        return retries;
    }

    public void setRetries(String retries) {
        this.retries = retries;
    }

    public String getMaxRequestSize() {
        return maxRequestSize;
    }

    public void setMaxRequestSize(String maxRequestSize) {
        this.maxRequestSize = maxRequestSize;
    }

    public Producer<String, String> getProducer() {
        return producer;
    }

    public void setProducer(Producer<String, String> producer) {
        this.producer = producer;
    }

    public String getIsSend() {
        return isSend;
    }

    public void setIsSend(String isSend) {
        this.isSend = isSend;
    }
}

爲了實現根據指定格式發送Kafka日誌,直接繼承了ConsoleAppender.

logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
    <property name="LOG_HOME" value="./logs"/>
    <springProperty scope="context" name="springAppName"
                    source="spring.application.name"/>
    <!-- 讀取配置文件中kafka的信息 -->
    <springProperty scope="context" name="isSend"
                    source="log.config.kafka.isSend" defalutValue="false"/>
    <springProperty scope="context" name="bootstrapServers"
                    source="log.config.kafka.bootstrapServers" defalutValue="localhost:9002"/>
    <springProperty scope="context" name="topic"
                    source="log.config.kafka.topic" defalutValue="test-topic"/>
    <springProperty scope="context" name="batchSize"
                    source="log.config.kafka.batchSize" defalutValue="1"/>
    <springProperty scope="context" name="lingerMs"
                    source="log.config.kafka.lingerMs" defalutValue="1000"/>
    <springProperty scope="context" name="compressionType"
                    source="log.config.kafka.compressionType" defalutValue="gzip"/>
    <springProperty scope="context" name="retries"
                    source="log.config.kafka.retries" defalutValue="3"/>
    <springProperty scope="context" name="maxRequestSize"
                    source="log.config.kafka.maxRequestSize" defalutValue="5242880"/>
    <!-- 根據需要自行配置 -->
    <property name="APP_NAME" value="${springAppName}"/>
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>
                {
                "timestamp":"%date{yyyy-MM-dd HH:mm:ss.SSS}",
                "app": "${APP_NAME}",
                "logLevel": "%level",
                "message": "%message"
                }\n
            </pattern>
        </encoder>
    </appender>

    <appender name="KAFKA" class="com.demo.log2kafka.appender.KafkaAppender" >
        <!-- encoder必須配置, 日誌格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>
{
"timestamp":"%date{yyyy-MM-dd HH:mm:ss.SSS}",
"app": "${APP_NAME}",
"logLevel": "%level",
"message": "%message"
}\n
            </pattern>
        </encoder>
        <bootstrapServers>${bootstrapServers}</bootstrapServers>
        <topic>${topic}</topic>
        <batchSize>${batchSize}</batchSize>
        <lingerMs>${lingerMs}</lingerMs>
        <compressionType>${compressionType}</compressionType>
        <retries>${retries}</retries>
        <maxRequestSize>${maxRequestSize}</maxRequestSize>
        <isSend>${isSend}</isSend>
    </appender>
    <!-- 使用logback-kafka-appender 當日志級別配爲debug時,請使用該配置,不要使用root -->
    <logger name="com.demo.log2kafka" level="DEBUG">
        <appender-ref ref="KAFKA"/>
    </logger>
    <!-- 日誌輸出級別 -->
    <root  level="INFO">
        <!-- 用於控制檯輸出 -->
        <appender-ref ref="STDOUT"/>
    </root>

</configuration>

application.yml

spring:
  application:
    name: log2kafka
# 不使用時可以不配置
log:
  config:
    kafka:
      # 是否將日誌發送至kafka,true或false,使用時必須配置
      isSend: true
      # kafka的地址,使用時必須配置
      bootstrapServers: 192.168.254.152:9092,192.168.254.156:9092
      # 日誌發往的topic,使用時必須配置
      topic: test-topic
#      # 批量上傳數目,達到該數目後發送
      batchSize: 5
#      # 間隔時間後發送,即使未達到批量上傳最大數,間隔時間到了也會發送,單位爲毫秒
      lingerMs: 1000
#      # 數據壓縮類型
#      compressionType: gzip
#      # 重試次數
#      retries: 3
#      # 最大消息大小,此處設爲5M
#      maxRequestSize: 5242880
server:
  port: 9090
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章