EHR和oa系統增加elk日誌系統全解析,elasticsearch+logstash+kibana+filebeat搭建elk日誌系統

1、創建虛擬機

2、安裝docker和docker-compose

 #docker安裝

add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

apt-get update

apt-get install docker-ce=5:19.03.11~3-0~ubuntu-xenial  

編輯/etc/docker/daemon.json

{

  "registry-mirrors": ["https://dockerhub.azk8s.cn"],

  "data-root": "/data/docker",

  "metrics-addr" : "0.0.0.0:9323",

  "experimental" : true ,

  "bip": "172.31.0.1/24",

  "default-address-pools":[

    {"base":"172.31.0.0/16","size":24}

  ]

}

systemctl enable docker

systemctl start docker

#docker-compose下載

curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose

chmod +x * 

3、編寫logstash+elasticsearch+kibana的docker-compose文件

(注意 要先創建了es,然後進去創建用戶角色開啓訪問驗證

es配置xpack.security.enabled: true

discovery.type: single-node

創建啓動然後進入容器執行

./bin/elasticsearch-setup-passwords auto

 

密碼不生效別寫在環境配置裏,寫在配置文件然後映射

version: '2.3'

services:

  elasticsearch:

    image: docker.elastic.co/elasticsearch/elasticsearch:6.8.10

    volumes:

      - esdata:/usr/share/elasticsearch/data:rw

      - /etc/localtime:/etc/localtime

      - /var/log/elasticsearch:/usr/share/elasticsearch/logs

      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml

    environment:

      - TZ="Asia/Shanghai"

      - _JAVA_OPTIONS=-Xmx1024m -Xms1024m

      - bootstrap.memory_lock=false

      - cluster.name=elk-logs

      - network.host=0.0.0.0

      - xpack.security.enabled=true

      - discovery.type=single-node

      - node.master=true

      - node.data=true

      - discovery.zen.minimum_master_nodes=1

      - discovery.zen.ping.unicast.hosts=10.0.0.11:9300

    ulimits:

      memlock:

        soft: -1

        hard: -1

    network_mode: host

    restart: unless-stopped

logstash:

    image: docker.elastic.co/logstash/logstash:6.8.10

    volumes:

      - ./logstash/pipeline:/usr/share/logstash/pipeline

    network_mode: host

    environment:

      - TZ="Asia/Shanghai"

      - LS_JAVA_OPTS=-Xmx256m -Xms256m

      - node.name= "logstash"

      - http.host= "0.0.0.0"

      - pipeline.id= "pipeline"

      - xpack.monitoring.elasticsearch.url="http://10.0.0.11:9201"

      #- xpack.monitoring.elasticsearch.username="logstash_system"

      #- xpack.monitoring.elasticsearch.password=""

      - xpack.monitoring.enabled=false

    restart: unless-stopped

  kibana:

    image: docker.elastic.co/kibana/kibana:6.8.10

    volumes:

    - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml

    network_mode: host

    environment:

      - LS_JAVA_OPTS=-Xmx512m -Xms512m

      - SERVER_NAME="kibana"

      - XPACK_SECURITY_ENABLED=true

      - XPACK_MONITORING_ENABLED=true

      - ELASTICSEARCH_HOSTS="http://10.0.0.11:9200"

      #- elasticsearch.username="elastic"

      #- elasticsearch.password=""

    restart: unless-stopped

logstash-nginx:

    image: docker.elastic.co/logstash/logstash:6.8.10

    volumes:

      - ./logstash/pipeline_nginx:/usr/share/logstash/pipeline

    network_mode: host

    environment:

      - TZ="Asia/Shanghai"

      - LS_JAVA_OPTS=-Xmx256m -Xms256m

      - node.name= "logstash"

      - http.host= "0.0.0.0"

      - pipeline.id= "pipeline"

      - xpack.monitoring.elasticsearch.url="http://10.0.0.11:9201"

      #- xpack.monitoring.elasticsearch.username="logstash_system"

      #- xpack.monitoring.elasticsearch.password=""

      - xpack.monitoring.enabled=false

    restart: unless-stopped

volumes:

  esdata:

 

會在docker的data目錄下創建數據位置

創建四個容器 分別給ehr的logstash和oa的logstash配置pipieline匹配收集日誌

(1) ehr

input {

     beats{

        port => 5044

      }

}

filter {

    # ignore log comments

    if [message] =~ "^#" {

        drop {}

    }

    # check that fields match your IIS log settings

    grok {

        #remove_field => ["message"]

        match => ["message", "%{TIMESTAMP_ISO8601:log_timestamp} %{IPORHOST:s-ip} %{WORD:cs-method} %{NOTSPACE:cs-uri-stem} %{NOTSPACE:cs-uri-query} %{NUMBER:s-port} %{NOTSPACE:cs-username} %{IPORHOST:c-ip} %{NOTSPACE:cs-useragent} %{NOTSPACE:referer} %{NUMBER:sc-status} %{NUMBER:sc-substatus} %{NUMBER:sc-win32-status} %{NUMBER:time-taken:int}"]

    }

    # set the event timestamp from the log

    # https://www.elastic.co/guide/en/logstash/current/plugins-filters-date.html

    date {

        match => ["log_timestamp", "YYYY-MM-dd HH:mm:ss"]

        target => "@timestamp"

        #remove_field => ["log_timestamp"]

    }

    # matches the big, long nasty useragent string to the actual browser name, version, etc

    # https://www.elastic.co/guide/en/logstash/current/plugins-filters-useragent.html

    useragent {

        source=> "cs-useragent"

        prefix=> "browser_"

    }

}

 

output {

    elasticsearch {

        hosts => ["10.0.0.11:9200"]

        user => "elastic"

        password => ""

        index => "ehr-access_log-%{+YYYYMMdd}"

    }

    # output to console

}

 

(2)oa

input {

     beats{

        port => 5045

      }

}

 

filter {

    if [message] =~ "^#" {

        drop {}

    }

     if [type] == "nginx_access"{

        grok {

              match => ["message","%{IPORHOST:remote_addr} - %{HTTPDUSER:remote_user} \[%{HTTPDATE:time_local}\] \"(?:%{WORD:method} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:status} (?:%{NUMBER:body_bytes}|-) %{QS:referrer} %{QS:user_agent} %{QS:x_forward_for}"]

                }

        }

     if [type] == "nginx_error"{

        grok {

                match => [ "message", "(?<time_local>%{YEAR}[./-]%{MONTHNUM}[./-]%{MONTHDAY}[- ]%{TIME}) \[%{LOGLEVEL:log_level}\] %{POSINT:pid}#%{NUMBER}: %{GREEDYDATA:error_message}(?:, client: (?<client>%{IP}|%{HOSTNAME}))(?:, server: %{IPORHOST:server}?)(?:, request: %{QS:request})?(?:, upstream: (?<upstream>\"%{URI}\"|%{QS}))?(?:, host: %{QS:request_host})?(?:, referrer: \"%{URI:referrer}\")?",

                        "message", "(?<time_local>%{YEAR}[./-]%{MONTHNUM}[./-]%{MONTHDAY}[- ]%{TIME}) \[%{LOGLEVEL:log_level}\]\s{1,}%{GREEDYDATA:error_message}"

                        ]

                }

        }

    date {

        match => ["log_timestamp", "YYYY-MM-dd HH:mm:ss"]

        target => "@timestamp"

        remove_field => ["log_timestamp"]

    }

    useragent {

        source=> "cs-useragent"

        prefix=> "browser_"

    }

}

output {

    elasticsearch {

        hosts => ["10.0.0.11:9200"]

        user => "elastic"

        password => ""

        index => "oa-%{type}-log-%{+YYYYMMdd}"

    }

}

 

(3)kibana配置文件映射

server.port: 5601

server.host: "10.0.0.11"

elasticsearch.url: "http://10.0.0.11:9200"

elasticsearch.username: "kibana"

elasticsearch.password: ""

sudo docker-compose up -d 創建啓動容器

4、分別在windows的iis環境下安裝filebeat

(1)windows下ehr

https://www.elastic.co/cn/downloads/beats/filebeat 

注意es log kb和filebeat版本一致

下載windows版本,配合iis使用

filebeat配置讀取日誌位置

 

安裝解壓放在默認位置 管理員權限執行

解壓到 C:\Program Files

重命名 filebeat-5.0.0-windows 目錄爲 Filebeat

右鍵點擊 PowerSHell 圖標,選擇『以管理員身份運行』

運行下列命令,將 Filebeat 安裝成 windows 服務:

PS > cd 'C:\Program Files\Filebeat'

PS C:\Program Files\Filebeat> .\install-service-filebeat.ps1

注意

可能需要額外授予執行權限。命令爲:PowerShell.exe -ExecutionPolicy RemoteSigned -File .\install-service-filebeat.ps1.

 

配置修改service 

"C:\Program Files\filebeat\filebeat.exe" -c "C:\Program Files\filebeat\filebeat.yml" -path.home "C:\Program Files\filebeat" -path.data "C:\Program Files\filebeat\data" -path.logs "C:\Program Files\filebeat\logs"

配置文件

filebeat.inputs:

- type: log

  enabled: true

  fields:

    type: pc

  fields_under_root: true

  paths:

    - C:\inetpub\logs\LogFiles\W3SVC1\*.log

- type: log

  enabled: true

  fields:

    type: mobile

  fields_under_root: true

  paths:

    - C:\inetpub\logs\LogFiles\W3SVC4\*.log

# - type: log

  # enabled: true

  # fields:

    # type: appLog

  # fields_under_root: true

  # paths:

    # - C:\HRLOG\*\*.txt

#拼接多行日誌內容

  # multiline.pattern: ^\[

  # multiline.negate: true

  # multiline.match: after

filebeat.config.modules:

  path: ${path.config}/modules.d/*.yml

  reload.enabled: false

setup.template.settings:

  index.number_of_shards: 3

setup.kibana:

  host: "10.0.0.11:5601"

  username: "kibana"

  password: ""

output.logstash:

  hosts: "10.0.0.11:5044"

  username: "logstash_system"

  password: ""

  worker: 4

  compression_level: 3

  bulk_max_size: 20480

xpack.monitoring:

  enabled: false

(2)centos下oa

curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.8.10-linux-x86_64.tar.gz

tar xzvf filebeat-6.8.10-linux-x86_64.tar.gz

賦予filefeat.yml權限 sudo chown root:root filefeat.yml

配置文件如下

filebeat.inputs:

- type: log

  enabled: true

  fields:

    type: nginx_access

  fields_under_root: true

  paths:

    - /usr/local/nginx/logs/access.log

- type: log

  enabled: true

  fields:

    type: nginx_error

  fields_under_root: true

  paths:

    - /usr/local/nginx/logs/error.log

filebeat.config.modules:

  path: ${path.config}/modules.d/*.yml

  reload.enabled: false

setup.template.settings:

  index.number_of_shards: 3

setup.kibana:

  host: "10.0.0.11:5601"

  username: "kibana"

  password: ""

output.logstash:

  hosts: "10.0.0.11:5045"

  username: "logstash_system"

  password: ""

  worker: 4

  compression_level: 3

  bulk_max_size: 20480

xpack.monitoring:

  enabled: false

sudo ./filebeat -e -c filebeat.yml

創建自啓動service

[Unit]

Description=filebeat

Wants=network-online.target

After=network-online.target

 

[Service]

User=root

ExecStart=/home/pupumall/filebeat/filebeat  -e -c /home/pupumall/filebeat/filebeat.yml

 

[Install]

WantedBy=multi-user.target

 

sudo systemctl daemon-reload 

sudo systemctl enable filebeat.service

sudo systemctl start filebeat

 

5、登錄kabana 配置role和user

配置索引和顯示

注意日誌的過濾與匹配

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章