springboot+logback+kafka+logstash將分佈式日誌彙集到ElasticSearch中

1.添加jar包

 

        <!--kafka日誌追加-->
        <dependency>
            <groupId>com.github.danielwegener</groupId>
            <artifactId>logback-kafka-appender</artifactId>
            <version>0.2.0-RC1</version>
        </dependency>
        <dependency>
            <groupId>net.logstash.logback</groupId>
            <artifactId>logstash-logback-encoder</artifactId>
            <version>6.3</version>
        </dependency>

2.編寫獲取ip地址的ClassicConverter

/**
 *  @ClassName LogIpConfig
 *  @Author ShuYu Liu
 *  @Description 獲取ip
 *  @Date 2020/6/10 13:26
 */
public class LogIpConfig extends ClassicConverter {
    private static final Logger LOGGER = LoggerFactory.getLogger(LogIpConfig .class);
    @Override
    public String convert(ILoggingEvent event) {
    
        StringBuilder sb = new StringBuilder();
        try {
            //獲取本地所有網絡接口
            Enumeration< NetworkInterface > en = NetworkInterface.getNetworkInterfaces();
            //遍歷枚舉中的每一個元素
            while (en.hasMoreElements()) {
                NetworkInterface ni= (NetworkInterface) en.nextElement();
                Enumeration <InetAddress> enumInetAddr = ni.getInetAddresses();
                while (enumInetAddr.hasMoreElements()) {
                    InetAddress inetAddress = (InetAddress) enumInetAddr.nextElement();
                    if (!inetAddress.isLoopbackAddress()  && !inetAddress.isLinkLocalAddress()
                        && inetAddress.isSiteLocalAddress()) {
                        sb.append("name:" + inetAddress.getHostName().toString()+"\n");
                        sb.append("ip:" + inetAddress.getHostAddress().toString()+"\n");
                    }
                }
            }
        } catch (SocketException e) {
        
        }
        return sb.toString();
    }
}
3.bootstrap.yml中配置kafka參數
spring:
  test:
    kafka:
      bootstrap-servers: localhost:9092
      topic: logs-channel
      client-id: logs-${random.value}
      random-name: ${random.value}

4.logback中加入如下配置

<conversionRule conversionWord="ip" converterClass="com.xxx.LogIpConfig" />
    <!-- 測試環境 -->
    <springProfile name="test">
        <!-- configuration to be enabled when the "staging" profile is active -->
        <springProperty scope="context" name="module" source="spring.application.name"
                        defaultValue="undefinded"/>
        <!-- 該節點會讀取Environment中配置的值,在這裏我們讀取application.yml中的值 -->
        <springProperty scope="context" name="bootstrapServers" source="spring.test.kafka.bootstrap-servers"/>

        <springProperty scope="context" name="topic" source="spring.test.kafka.topic"/>

        <springProperty scope="context" name="clientId" source="spring.test.kafka.client-id"/>

        <springProperty scope="context" name="randomName" source="spring.test.kafka.random-name"/>

        <!-- kafka的appender配置 -->
        <appender name="kafka" class="com.github.danielwegener.logback.kafka.KafkaAppender">
            <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
                <!--格式化輸出,%d:日期;%thread:線程名;%-5level:級別,從左顯示5個字符寬度;%msg:日誌消息;%n:換行符-->
                <pattern>
                    host=%ip#@#timestamp=%d{yyyy-MM-dd HH:mm:ss.SSS}#@#randomName=${randomName}#@#thread=%thread#@#logLevel=%level#@#logger=%logger{50} - %msg%n
                </pattern>
            </encoder>
            <topic>${topic}</topic>
            <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
            <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
            <producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
            <producerConfig>acks=0</producerConfig>
            <!-- wait up to 1000ms and collect log messages before sending them as a batch -->
            <producerConfig>linger.ms=1000</producerConfig>
            <producerConfig>max.block.ms=0</producerConfig>
            <producerConfig>client.id=${clientId}</producerConfig>
        </appender>
        <logger name="com.xxx" level="info">
            <appender-ref ref="kafka" />
        </logger>
    </springProfile>

5.logstash配置

input {

   kafka {
	  id => "logstash"
      topics => ["test-order-logger-channel"]
      bootstrap_servers => ["localhost:9092"]
      codec => plain
	  decorate_events => true
	  type => "logs"
	  consumer_threads => 2
   }

}

filter {
    kv {
        source => "message"
        field_split => "#@#"
    }
   mutate{
        remove_field => ["@version"]
        remove_field => ["@timestamp"]
		remove_field => ["message"]
	}
}

output {


		if[type]=="logs"{
                elasticsearch {
                        hosts => "localhost:9200"
                        index => "logs-%{+YYYY.MM.dd}"
                        document_type => "logs"
						document_id => "%{[@metadata][kafka][topic]}-%{[@metadata][kafka][partition]}-%{[@metadata][kafka][offset]}"
                }
        }
}

這樣數據結果就展示在這裏了

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章