Docker部署运行ELK

mkdir -p /home/docker/elasticsearch/data
mkdir -p /home/docker/elasticsearch/logs
mkdir -p /home/docker/logstash

在 /home/docker/logstash目录下 vi logstash.conf



input {
    kafka {
        bootstrap_servers => ["192.168.1.80:9092"] # 注意这里配置的kafka的broker地址不是zk的地址
        group_id => "applogs-msg" # 自定义groupid 
        topics => ["applogs"]  # kafka topic 名称 
        consumer_threads => 5 
        decorate_events => true
        codec => "json"
        tags => ["applogs1"]
        
      }
   # 这样可以配置多个topic,多个tags,如果不需要可以不加
   kafka {
        bootstrap_servers => ["192.168.1.80:9092"] # 注意这里配置的kafka的broker地址不是zk的地址
        group_id => "applogs-msg" # 自定义groupid
        topics => ["applogs1"]  # kafka topic 名称
        consumer_threads => 5
        decorate_events => true
        codec => "json"
        tags => ["applogs2"]
      }
}

output{
        elasticsearch{
                hosts=>["192.168.1.80:9200"]  
                index => "%{tags}-%{+YYYY.MM.dd}"
        }
        stdout{codec => rubydebug}
}

在/home/docker/logstash目录下, vi logstash.yml

path.config: /etc/logstash/conf.d

在/home/docker 目录下,vi docker-elk.yml

version: '3.4'
services:
 elasticsearch:
  image: elasticsearch:7.4.2
  container_name: elasticsearch-single
  restart: always
  ulimits:
    nproc: 65535
    nofile:
      soft: 40000
      hard: 65535 
    memlock:
      soft: -1
      hard: -1
  ports:
    - "9200:9200"
    - "9300:9300"
  environment:
    - TZ=Asia/Shanghai
    - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
    - bootstrap.memory_lock=true
    - http.host=0.0.0.0
    #- cluster.initial_master_nodes=["node-1"]
    #- bootstrap.memory_lock=false
    - bootstrap.system_call_filter=false
    - discovery.type=single-node #单机运行
  volumes:
    #- /home/docker/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 
    - /home/docker/elasticsearch/data:/usr/share/elasticsearch/data
    - /home/docker/elasticsearch/logs:/usr/share/elasticsearch/logs 
    #- /home/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins 

 logstash:
  image: logstash:7.4.2
  container_name: logstash-single 
  restart: always
  #network_mode: "bridge"
  #command: logstash -f /config-dir
  ports:
    - "5044:5044"
  volumes:
    - /home/docker/logstash/logstash.conf:/opt/logstash/config/logstash.conf    
    - /home/docker/logstash/logstash.yml:/opt/logstash/config/logstash.yml   
    - /home/docker/logstash/logstash.conf:/etc/logstash/conf.d/logstash.conf
    - /home/docker/logstash/logstash.yml:/etc/logstash/logstash.yml
  environment:
      # 设置时区
    TZ: Asia/Shanghai
  external_links:
    - elasticsearch:elasticsearch-single

 kibana:
  image: kibana:7.4.2
  container_name: kibana-single 
  restart: always
  #network_mode: "bridge"
  ports:
    - "5601:5601"
  external_links:
    - elasticsearch:elasticsearch-single

最后docker-compose -f docker-elk.yml up -d

部署遇到的问题

* soft noproc 11000
* hard noproc 11000
* soft nofile 4100
* hard nofile 4100 
* - nofile65536 
* - memlock unlimited

max virtual memory areas vm.max_map_count [65530]is too low, increase to at least [262144]
这个错误,如果是linux直接部署es的话
vi /etc/sysctl.conf
添加
vm.max_map_count=655360

max file descriptors [65535] for elasticsearchprocess is too low, increase to at least [65536]
这个错误,如果是linux直接部署es的话
vi /etc/security/limits.conf
* soft noproc 11000
* hard noproc 11000
* soft nofile 4100
* hard nofile 4100 
* - nofile65536 
* - memlock unlimited

执行 sysctl -p 立即生效

docker部署的话,以上这里两个错误需要修改配置
 ulimits:
    nproc: 65535
    nofile:
      soft: 40000
      hard: 65535 
    memlock:
      soft: -1
      hard: -1
docker-compose.yml里加上这个

the default discovery settings are unsuitable for production use; at least one of [discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configure
这个错误是es配置的问题
docker-compose.yml加上这个,或者 在es的配置文件里加
environment:
  - bootstrap.system_call_filter: false
  - cluster.initial_master_nodes: ["node-1"]



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章