- 10.100.2.21 node1
- 10.100.2.22 node2
- 10.100.2.23 node3
- zookeeper soft nofile 65536
- zookeeper hard nofile 65536
- zookeeper soft nproc 65536
- zookeeper hard nproc 65536
- kafka soft nofile 65536
- kafka hard nofile 65536
- kafka soft nproc 65536
- kafka hard nproc 65536
- useradd kafka
- useradd zookeeper
- mkdir -p /srv/{
- App,logs,data
- }/{
- zookeeper,kafka
- }
- chown -Rf kafka:kafka /srv/{
- App,logs,data
- }/kafka
- chown -Rf zookeeper:zookeeper /srv/{
- App,logs,data
- }/zookeeper
- echo -e "# append zk_env\nexport PATH=$PATH:/srv/app/zookeeper/bin">> /etc/profile
- tickTime=2000
- initLimit=10
- syncLimit=5
- dataDir=/srv/data/zookeeper
- dataLogDir=/srv/logs/zookeeper
- clientPort=2181
- autopurge.snapRetainCount=500
- autopurge.purgeInterval=24
- server.1= 10.100.2.21:2888:3888
- server.2= 10.100.2.22:2888:3888
- server.3= 10.100.2.23:2888:3888
需要创建 myid
- init.d/zookeeper
- #!/bin/bash
- #chkconfig:2345 20 90
- #description:zookeeper
- #processname:zookeeper
- export JAVA_HOME=/srv/App/tools/java/jdk1.8.0_181
- ZKUSER="root"
- ZKHOME="/srv/app/zookeeper"
- case $1 in
- start) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh start;;
- stop) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh stop;;
- status) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh status;;
- restart) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh restart;;
- *) echo "require start|stop|status|restart" ;;
- esac
- chown -Rf root:root /srv/{App,data,logs}/zookeeper
- [program:kafka]
- command = /srv/App/kafka/bin/kafka-server-start.sh /srv/App/kafka/config/server.properties
- autostart = true
- startsecs = 5
- autorestart = true
- startretries = 3
- user = kafka
- redirect_stderr = true
- stdout_logfile_maxbytes = 20MB
- stdout_logfile_backups = 20
- stdout_logfile = /srv/logs/supervisor/kafka_super.log
- docker pull openresty/openresty:alpine
- #!/bin/bash
- for version in 6.4.2 6.7.0 7.2.0 7.4.0 7.4.2 7.5.0; do
- echo ">>>>>> ${version}>>>>>>>"
- docker pull Elasticsearch:${version};
- docker pull logstash:${version};
- docker pull kibana:${version};
- #docker pull filebeat:${version};
- done
- docker pull Elasticsearch:7.4.0;
- docker pull Elasticsearch:6.4.2;
- docker pull Elasticsearch:6.7.0;
- docker pull Elasticsearch:7.5.0;
- docker pull Elasticsearch:7.4.0;
- docker pull Elasticsearch:6.4.2;
- docker pull Elasticsearch:6.7.0;
- docker pull Elasticsearch:7.5.0;
- #bin/bash
- #20170926
- iptables -F
- iptables -X
- iptables -P INPUT DROP #INPUT 链默认丢弃, 需要添加 ACCEPT 相应的地址规则才放开
- iptables -P OUTPUT ACCEPT #默认放通
- iptables -P FORWARD ACCEPT #默认放通
- ## 封禁某个 IP 的访问
- # 举例:
- # 1. 封禁 10.10.10.10 访问 nginx: iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP
- # 2. 封禁 10.10.10.10 全部访问 (不允许访问任何端口): iptables -A INPUT -s 10.10.10.10 -j DROP
- ### 封禁区 ###
- # iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP
- iptables -A INPUT -s 192.168.33.0/24 -p tcp --dport 22 -j ACCEPT
- iptables -A INPUT -s 192.168.35.0/24 -p tcp --dport 22 -j ACCEPT
- iptables -A INPUT -s 172.19.30.251 -p tcp --dport 22 -j ACCEPT #Jenkins 自动升级病毒库文件
- iptables -A INPUT -s 192.168.35.11 -j ACCEPT
- iptables -A INPUT -s 100.100.100.225 -p tcp --sport 7000 -j ACCEPT #ELK filebeat 连接 Redis 端口
- iptables -A INPUT -m multiport -p tcp --dport 80,443 -j ACCEPT #允许 dport 80,443, 外部请求 nginx
- iptables -A INPUT -m multiport -p tcp --sport 80,443 -j ACCEPT #允许 sport 80,443, 访问 yum 源
- iptables -A INPUT -p udp --sport 53 -j ACCEPT #允许本地 dns 解析
- iptables -A INPUT -m multiport -p udp --sport 123,323 -j ACCEPT #允许本地进行 ntp 同步时间
- iptables -A INPUT -p icmp -j ACCEPT #允许 ping
- iptables -A INPUT -i lo -p all -j ACCEPT #允许本地回环解析
- iptables -A FORWARD -f -m limit --limit 100/s --limit-burst 100 -j ACCEPT #处理 IP 碎片数量, 防止攻击, 允许每秒 100 个
- iptables -A FORWARD -p icmp -m limit --limit 1/s --limit-burst 10 -j ACCEPT #设置 ICMP 包过滤, 允许每秒 1 个包, 限制触发条件是 10 个包.
- iptables -A FORWARD -m state --state INVALID -j DROP #drop 非法转发
- service iptables save
- service iptables restart
- chkconfig iptables on
- iptables -nv -L
来源: http://www.bubuko.com/infodetail-3353429.html