zk-kafka安裝記錄

10.100.2.21 node1 10.100.2.22 node2 10.100.2.23 node3 zookeeper soft nofile 65536 zookeeper hard nofile 65536 zookeeper soft nproc 65536 zookeeper hard nproc 65536 kafka soft nofile 65536 kafka hard nofile 65536 kafka soft nproc 65536 kafka hard nproc 65536 useradd kafka useradd zookeeper mkdir -p /srv/{app,logs,data}/{zookeeper,kafka} chown -Rf kafka:kafka /srv/{app,logs,data}/kafka chown -Rf zookeeper:zookeeper /srv/{app,logs,data}/zookeeper echo -e "# append zk_env\nexport PATH=$PATH:/srv/app/zookeeper/bin" >> /etc/profile tickTime=2000 initLimit=10 syncLimit=5 dataDir=/srv/data/zookeeper dataLogDir=/srv/logs/zookeeper clientPort=2181 autopurge.snapRetainCount=500 autopurge.purgeInterval=24 server.1= 10.100.2.21:2888:3888 server.2= 10.100.2.22:2888:3888 server.3= 10.100.2.23:2888:3888 需要創建myid init.d/zookeeper #!/bin/bash #chkconfig:2345 20 90 #description:zookeeper #processname:zookeeper export JAVA_HOME=/srv/app/tools/java/jdk1.8.0_181 ZKUSER="root" ZKHOME="/srv/app/zookeeper" case $1 in start) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh start;; stop) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh stop;; status) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh status;; restart) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh restart;; *) echo "require start|stop|status|restart" ;; esac chown -Rf root:root /srv/{app,data,logs}/zookeeper [program:kafka] command = /srv/app/kafka/bin/kafka-server-start.sh /srv/app/kafka/config/server.properties autostart = true startsecs = 5 autorestart = true startretries = 3 user = kafka redirect_stderr = true stdout_logfile_maxbytes = 20MB stdout_logfile_backups = 20 stdout_logfile = /srv/logs/supervisor/kafka_super.log docker pull openresty/openresty:alpine #!/bin/bash for version in 6.4.2 6.7.0 7.2.0 7.4.0 7.4.2 7.5.0; do echo ">>>>>> ${version} >>>>>>>" docker pull elasticsearch:${version}; docker pull logstash:${version}; docker pull kibana:${version}; #docker pull filebeat:${version}; done docker pull elasticsearch:7.4.0; docker pull elasticsearch:6.4.2; docker pull elasticsearch:6.7.0; docker pull elasticsearch:7.5.0; docker pull elasticsearch:7.4.0; docker pull elasticsearch:6.4.2; docker pull elasticsearch:6.7.0; docker pull elasticsearch:7.5.0; #bin/bash #20170926 iptables -F iptables -X iptables -P INPUT DROP #INPUT鏈默認丟棄,需要添加ACCEPT相應的地址規則才放開 iptables -P OUTPUT ACCEPT #默認放通 iptables -P FORWARD ACCEPT #默認放通 ## 封禁某個IP的訪問 # 舉例: # 1. 封禁10.10.10.10訪問nginx: iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP # 2. 封禁10.10.10.10全部訪問(不允許訪問任何端口): iptables -A INPUT -s 10.10.10.10 -j DROP ### 封禁區 ### # iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP iptables -A INPUT -s 192.168.33.0/24 -p tcp --dport 22 -j ACCEPT iptables -A INPUT -s 192.168.35.0/24 -p tcp --dport 22 -j ACCEPT iptables -A INPUT -s 172.19.30.251 -p tcp --dport 22 -j ACCEPT #Jenkins自動升級病毒庫文件 iptables -A INPUT -s 192.168.35.11 -j ACCEPT iptables -A INPUT -s 100.100.100.225 -p tcp --sport 7000 -j ACCEPT #ELK filebeat連接redis端口 iptables -A INPUT -m multiport -p tcp --dport 80,443 -j ACCEPT #允許dport 80,443,外部請求nginx iptables -A INPUT -m multiport -p tcp --sport 80,443 -j ACCEPT #允許sport 80,443,訪問yum源 iptables -A INPUT -p udp --sport 53 -j ACCEPT #允許本地dns解析 iptables -A INPUT -m multiport -p udp --sport 123,323 -j ACCEPT #允許本地進行ntp同步時間 iptables -A INPUT -p icmp -j ACCEPT #允許ping iptables -A INPUT -i lo -p all -j ACCEPT #允許本地迴環解析 iptables -A FORWARD -f -m limit --limit 100/s --limit-burst 100 -j ACCEPT #處理IP碎片數量,防止攻擊,允許每秒100個 iptables -A FORWARD -p icmp -m limit --limit 1/s --limit-burst 10 -j ACCEPT #設置ICMP包過濾,允許每秒1個包,限制觸發條件是10個包. iptables -A FORWARD -m state --state INVALID -j DROP #drop非法轉發 service iptables save service iptables restart chkconfig iptables on iptables -nv -L
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章