# cat /etc/issue CentOS release 6.5 (Final)
> vim /etc/elasticsearch/elasticsearch.yml # ---------------------------------- Network ----------------------------------- # # Set the bind address to a specific IP (IPv4 or IPv6): # # network.host: 127.0.0.1 network.host: 192.168.20.50 # # Set a custom port for HTTP: # http.port: 9200 ... bootstrap.system_call_filter: false
!使用本地 IP(127.0.0.1)時,Elasticsearch 進入 dev mode,只能從本機訪問,只顯示警告。
ERROR: bootstrap checks failed max file descriptors [65535] for elasticsearch process likely too low, increase to at least [65536] memory locking requested for elasticsearch process but memory is not locked max number of threads [1024] for user [jason] likely too low, increase to at least [2048] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144] system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
需要針對這些參數進行設置:
> vim /etc/security/limits.conf ... elasticsearch hard nofile 65536 # 針對 max file descriptors elasticsearch soft nproc 2048 # 針對 max number of threads > vim /etc/sysctl.conf ... vm.max_map_count=262144 # 針對 max virtual memory areas > vim /etc/elasticsearch/elasticsearch.yml ... bootstrap.system_call_filter: false # 針對 system call filters failed to install, 參見 https://www.elastic.co/guide/en/elasticsearch/reference/current/system-call-filter-check.html
sudo chkconfig --add elasticsearch # configure Elasticsearch to start automatically when the system boots up sudo -i service elasticsearch start sudo -i service elasticsearch stop
日誌: /var/log/elasticsearch/
rpm -vi logstash-5.2.0.rpm
這個例子裏使用 Filebeat 將測試用的 Apache web log 作爲 logstash的輸入,解析並寫入數據到 ElasticSearch 中。
> vim /etc/logstash/conf.d/first-pipeline.conf input { beats { port => "5043" } } filter { grok { match => { "message" => "%{COMBINEDAPACHELOG}"} } geoip { source => "clientip" } } output { elasticsearch { hosts => [ "192.168.20.50:9200" ] index => "testlog-%{+YYYY.MM.dd}" } }
grok
可以解析未結構化的日誌數據,Grok filter pattern 測試網站:
http://grokdebug.herokuapp.com/
%{COMBINEDAPACHELOG}
%{IPORHOST:clientip} %{USER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} %{NOTSPACE:request} (?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})" %{NUMBER:response} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent}
啓動:
此處建議使用手動建立service文件 進行服務管理
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.0-x86_64.rpm sudo rpm -vi filebeat-5.2.0-x86_64.rpm
> vim /etc/filebeat/filebeat.yml filebeat.prospectors: - input_type: log paths: - /var/log/logstash-tutorial.log # 之前下載的測試文件 #- /var/log/*.log #- c:\programdata\elasticsearch\logs\* ... #----------------------------- Logstash output -------------------------------- output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] hosts: ["localhost:5043"]
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.0-x86_64.rpm sudo rpm -vi filebeat-5.2.0-x86_64.rpm sudo /etc/init.d/filebeat start
> vim /etc/kibana/kibana.yml server.host: "192.168.20.50" elasticsearch.url: "http://192.168.20.50:9200"
> sudo chkconfig --add kibana # 設置自動啓動 > sudo -i service kibana start > sudo -i service kibana stop