ELK搭建
https://blog.csdn.net/qq_37598011/article/details/103260806
配置修改
先到logstash的config目錄下
cd /application/logstash/config/
新建logstash-test.conf配置
vim logstash-test.conf
input {
kafka{
id => "my_plugin_id"
bootstrap_servers=>"192.168.78.133:9092"
topics=>["logger-platform","logger-user","logger-gateway"]
# codec=>plain
codec=>json_lines
auto_offset_reset => "latest"
}
#stdin { }
}
#filter {
# grok {
# patterns_dir => ["./patterns"]
# match => { "message" => "%{WORD:module} \| %{LOGBACKTIME:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage}" }
# }
#}
output {
elasticsearch {
#action => "index"
hosts => "127.0.0.1:9200"
index => "logger-%{+YYYY-MM}"
}
#stdout { codec=> rubydebug }
}
重啓
nohub ../bin/logstash -f ./logstash-test.conf >/dev/null 2>&1 &
SpringBoot修改
新增pom依賴
<!--kafka依賴-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.2.0.RELEASE</version>
</dependency>
<!--logback-kafka-appender依賴-->
<dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>0.2.0-RC1</version>
<scope>runtime</scope>
</dependency>
<!--導入ELK爲json-->
<!-- logback-->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.3</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.1</version>
</dependency>
<!--鏈路追蹤-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-sleuth</artifactId>
</dependency>
logback文件修改
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<!-- 該節點會讀取Environment中配置的值,在這裏我們讀取application.yml中的值 -->
<springProperty scope="context" name="bootstrapServers" source="spring.kafka.bootstrap-servers"
defaultValue="localhost:9092"/>
<springProperty scope="context" name="server-name" source="spring.application.name"/>
<springProperty scope="context" name="ip" source="spring.cloud.client.ipAddress" />
<!-- kafka的appender配置 -->
<appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"timestamp": "%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ}",
"traceId": "%X{X-B3-TraceId:-}",
"parentSpanId" : "%X{X-B3-ParentSpanId:-}",
"spanId": "%X{X-B3-SpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"logger": "%logger",
"level": "%p",
"message": "%msg",
"host": "%property",
"serverName": "${server-name}",
"stackTrace": "%ex"
}
</pattern>
</pattern>
</providers>
</encoder>
<topic>logger-platform</topic>
<!-- we don't care how the log messages will be partitioned -->
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<!-- use async delivery. the application threads are not blocked by logging -->
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=1000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>
</appender>
<!-- 日誌輸出級別 -->
<root level="INFO">
<appender-ref ref="kafkaAppender"/>
</root>
配置application.yml
spring:
kafka:
bootstrap-servers: 192.168.78.133:9092
Slf4jUtils類
import org.slf4j.LoggerFactory;
import org.slf4j.spi.LocationAwareLogger;
import org.springframework.stereotype.Component;
import sun.misc.JavaLangAccess;
import sun.misc.SharedSecrets;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.text.MessageFormat;
/**
* @Classname Slf4jUtils
* @Description TODO
* @Date 2019/11/29 5:58
* @Created zzf
*/
@Component
public class Slf4jUtils {
/**
* 空數組
*/
private static final Object[] EMPTY_ARRAY = new Object[]{};
/**
* 全類名
*/
private static final String FQCN = Slf4jUtils.class.getName();
/**
* 獲取棧中類信息
*
* @param stackDepth
* @return
*/
private LocationAwareLogger getLocationAwareLogger(final int stackDepth) {
/**通過堆棧信息獲取調用當前方法的類名和方法名*/
JavaLangAccess access = SharedSecrets.getJavaLangAccess();
Throwable throwable = new Throwable();
StackTraceElement frame = access.getStackTraceElement(throwable, stackDepth);
return (LocationAwareLogger) LoggerFactory.getLogger(frame.getClassName() + "-" +
frame.getMethodName().split("\\$")[0] + "-" +
frame.getLineNumber());
}
/**
* 封裝Debug級別日誌
*
* @param msg
* @param arguments
*/
public void debug(String msg, Object... arguments) {
if (arguments != null && arguments.length > 0) {
MessageFormat temp = new MessageFormat(msg);
msg = temp.format(arguments);
}
getLocationAwareLogger(2).log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, EMPTY_ARRAY, null);
}
/**
* 封裝Info級別日誌
*
* @param msg
* @param arguments
*/
public void info(String msg, Object... arguments) {
if (arguments != null && arguments.length > 0) {
MessageFormat temp = new MessageFormat(msg);
msg = temp.format(arguments);
}
getLocationAwareLogger(2).log(null, FQCN, LocationAwareLogger.INFO_INT, msg, EMPTY_ARRAY, null);
}
/**
* 封裝Warn級別日誌
*
* @param msg
* @param arguments
*/
public void warn(String msg, Object... arguments) {
if (arguments != null && arguments.length > 0) {
MessageFormat temp = new MessageFormat(msg);
msg = temp.format(arguments);
}
getLocationAwareLogger(2).log(null, FQCN, LocationAwareLogger.WARN_INT, msg, EMPTY_ARRAY, null);
}
/**
* 封裝Error級別日誌
*
* @param msg
* @param arguments
*/
public void error(String msg, Object... arguments) {
if (arguments != null && arguments.length > 0) {
MessageFormat temp = new MessageFormat(msg);
msg = temp.format(arguments);
}
getLocationAwareLogger(2).log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, EMPTY_ARRAY, null);
}
/**
* 異常堆棧轉字符串
*
* @param e
* @return
*/
public String ExceptionToString(Exception e) {
StringWriter sw = null;
PrintWriter pw = null;
try {
if (e == null) {
return "無具體異常信息";
}
sw = new StringWriter();
pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} catch (Exception ex) {
this.error("異常堆棧轉字符串異常", ex);
return "";
} finally {
sw.flush();
pw.flush();
pw.close();
}
}
}
使用如下
跟log使用方式相同!
啓動Boot程序
配置Kibana
配置Kibana創建索引logger
鏈路追蹤!
OK!可以看到日誌已經輸出Kafka了,並且有鏈路追蹤功能!!!!!
通過AOP切面做日誌收集
例子:(通過AOP切面做日誌收集)
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ils.intelab.common.slf4j.Slf4jUtils;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.MDC;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.ArrayList;
import java.util.List;
/**
* @Classname LogAspect
* @Description TODO
* @Date 2019/12/2 15:19
* @Created zzf
*/
@Aspect
@Component
public class LogAspect {
@Resource
Slf4jUtils log;
/**
* 攔截所有控制器方法
*/
@Pointcut("execution(public * com.CC.CCC.CCCCC.controller..*.*(..))")
public void webLog() {
}
@Around("webLog()")
public Object aroundMethod(ProceedingJoinPoint joinPoint) throws Exception {
/*result爲連接點的放回結果*/
Object result = null;
/*result爲連接點的放回結果*/
ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
HttpServletRequest request = attributes.getRequest();
// 記錄下請求內容
//如果是表單,參數值是普通鍵值對。如果是application/json否則request.getParameter是取不到的。
String ip = request.getRemoteAddr();
String method = joinPoint.getSignature().getDeclaringTypeName() + "." + joinPoint.getSignature().getName();
JSONObject jsonObject = new JSONObject();
List<Object> argList = new ArrayList<>();
if ("application/json".equals(request.getHeader("Content-Type"))) {
for (Object arg : joinPoint.getArgs()) {
// request/response無法使用toJSON
if (arg instanceof HttpServletRequest) {
argList.add("request");
} else if (arg instanceof HttpServletResponse) {
argList.add("response");
} else {
argList.add(JSONObject.toJSON(arg));
}
}
} else {
//記錄請求的鍵值對
for (String key : request.getParameterMap().keySet()) {
JSONObject js = new JSONObject();
js.put(key, request.getParameter(key));
argList.add(js);
}
}
MDC.put("url", request.getRequestURL().toString());
MDC.put("ip", ip);
MDC.put("method", method);
MDC.put("arg", JSON.toJSON(argList).toString());
/*執行目標方法*/
try {
/*返回通知方法*/
result = joinPoint.proceed();
} catch (Throwable e) {
/*異常通知方法*/
MDC.put("throwable", e.getMessage());
StackTraceElement[] sta = e.getStackTrace();
StringBuffer str = new StringBuffer();
for (int i = 0; i < 15 && i < sta.length; i++) {
str.append(sta[i] + "\n");
}
log.error(str.toString());
//拋出異常
throw (Exception) e.fillInStackTrace();
}
/*後置通知*/
jsonObject.put("message", result);
log.info(jsonObject.toJSONString());
return result;
}
}
這裏我通過MDC擴展了logback輸出,所以需要在logback.xml中新增這幾個,如:
"ip": "%X{ip}",
"url": "%X{url}",
"method": "%X{method}",
"arg": "%X{arg}",
"throwable": "%X{throwable}"
%X{定義的字段}