0%

# node1
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=1 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.17:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.17:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# node2
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=2 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.19:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.19:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# node3
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=3 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.20:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.20:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# 2种ui
docker run -d -p 18080:8889 --name ui freakchicken/kafka-ui-lite
docker run -d -p 9000:9000 --name ui -e ZK_HOSTS=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 solsson/kafka-manager

# 测试命令
./kafka-topics.sh --create --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --replication-factor 1 --partitions 1 --topic first_kafka

./kafka-console-producer.sh --broker-list 192.168.3.17:9092,192.168.3.19:9092,192.168.3.20:9092 --topic first_kafka

./kafka-console-consumer.sh --bootstrap-server 192.168.3.17:9092,192.168.3.19:9092,192.168.3.20:9092 --topic first_kafka --from-beginning

./kafka-topics.sh --describe --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --topic first_kafka

./kafka-topics.sh --list --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181

./kafka-topics.sh --delete --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --topic first_kafka

vim /etc/hosts

所有机器配置好hosts.

ssh-keygen -t rsa

每台机器都执行一遍,一路Enter即可。

ssh-copy-id -i nodeName

每台机器都执行一遍,输入对应机器的密码.

ssh-copy-id -i node1
ssh-copy-id -i node2
ssh-copy-id -i node3
...

准备3台机器

安装

# 每台机器运行一条命令,注意ID和IP的对应
docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=1 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=2 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=3 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

# 测试
docker exec -it  zookeeper bash
# 连接一个节点create
zkCli.sh -server 192.168.3.17
create /mynode hello
# 连接另一个节点get
get /mynode

问题

navicat连接mysql8.0.26时报错.

Client does not support authentication protocol requested by server; consider upgrading MySQL client.

解决办法

# 进入mysql
docker exec -it mysql bash
# 登录
mysql -u root -p
use mysql;
-- 查看密码加密规则,8.0后使用的是caching_sha2_password,需要修改成mysql_native_password
select user,host,plugin from user where user='root';
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '123456';
ALTER USER 'root'@'%' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456';
flush privileges;

配置文件sentinel.conf

port 26379
sentinel monitor mymaster 192.168.41.128 6379 2
sentinel down-after-milliseconds mymaster 30000
sentinel parallel-syncs mymaster 1
sentinel failover-timeout mymaster 10000
sentinel deny-scripts-reconfig yes

启动1主2从

# 启动主节点
docker run -d --net host --name redis-server redis:6.2.5 redis-server --port 6379
# 启动从节点
docker run -d --net host --name redis-slave1 redis:6.2.5 redis-server --slaveof 192.168.41.128 6379  --port 6378
docker run -d --net host --name redis-slave2 redis:6.2.5 redis-server --slaveof 192.168.41.128 6379  --port 6377

启动哨兵

docker run -d -p 26379:26379 --name redis-sentinel1 -v $PWD/sentinel1.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf
docker run -d -p 26378:26379 --name redis-sentinel2 -v $PWD/sentinel2.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf
docker run -d -p 26377:26379 --name redis-sentinel3 -v $PWD/sentinel2.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf

查看状态

[root@node1 redis]# docker exec -it redis-sentinel1 bash
root@be853924137f:/data# redis-cli -p 26379
127.0.0.1:26379> info sentinel
# Sentinel
sentinel_masters:1
sentinel_tilt:0
sentinel_running_scripts:0
sentinel_scripts_queue_length:0
sentinel_simulate_failure_flags:0
master0:name=mymaster,status=sdown,address=192.168.41.128:6379,slaves=1,sentinels=3

# 查看masters
127.0.0.1:26379> SENTINEL masters

# 查看slaves
127.0.0.1:26379> SENTINEL slaves mymaster

# 关闭redis-server
docker stop redis-server
# 刷新查看master节点是否更换
docker exec -it  redis-sentinel1 redis-cli -p 26379 sentinel masters

启动参数

  • --requirepass 123456:设置客户端连接redis的认证信息
  • --masterauth 123456:设置从节点连接redis的认证信息

一主一从

# 启动主节点
docker run -d -p 6379:6379 --name redis-server redis:6.2.5 redis-server

# 启动从节点,可以启动多个
docker run -d -p 6378:6379 --name redis-slave redis:6.2.5 redis-server --slaveof 192.168.41.128 6379

# 查看主从状态
docker exec -it redis-server bash
root@2057712d5b24:/data# redis-cli 
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:1
slave0:ip=172.17.0.1,port=6379,state=online,offset=844,lag=0
master_failover_state:no-failover
master_replid:a7c3448b15070b13848c48139b99016a67cb5b9e
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:844
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:844


# 测试
# 主节点set,从节点可以get
# 从节点只支持读操作,不支持写