0%

说明

一个master,2个slave,只有一个nameNode.

注意:3台机器的安装目录及配置要求完全一致.可以先配置一台再拷贝到其它机器上.

主备3台机器

# 安装java环境
yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel

vim /etc/profile

export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-0.el7_7.x86_64
# hadoop 解压目录
export HADOOP_HOME=/root/hbase/hadoop-2.6.0
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
# 生效
source /etc/profile

下载二进制包

# zookeeper集群安装省略.参考用docker安装
wget http://archive.apache.org/dist/zookeeper/zookeeper-3.4.5/zookeeper-3.4.5.tar.gz
 
wget http://archive.apache.org/dist/hadoop/common/hadoop-2.6.0/hadoop-2.6.0.tar.gz
 
wget http://archive.apache.org/dist/hbase/1.2.0/hbase-1.2.0-bin.tar.gz

添加hadoop用户

usermod -a -G hadoop hadoop
passwd hadoop

vim /etc/sudoers

root	ALL=(ALL) 	ALL
hadoop	ALL=(ALL) 	ALL

配置免密省略.

创建name,data,tmp目录

mkdir -p dfs/name
mkdir -p dfs/data
mkdir tmp

关键配置

所有配置文件在hadoop-2.6.0/etc/hadoop/下:

<!-- hadoop-env.sh   yarn-env.sh 配置JAVA_HOME -->

<!-- core-site.xml  -->
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://data1:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/root/hbase/tmp</value>
    </property>
</configuration>

<!-- hdfs-site.xml -->
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>/root/hbase/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/root/hbase/dfs/data</value>
    </property>
</configuration>

<!-- mapred-site.xml -->
<configuration>
   <property>
      <name>mapred.job.tracker</name>
      <value>data1:9001</value>
   </property>
</configuration>

<!-- yarn-site.xml -->

<configuration>
     <property>
         <name>yarn.nodemanager.aux-services</name>
         <value>mapreduce_shuffle</value>
     </property>
     <property>
         <name>yarn.resourcemanager.address</name>
         <value>data1:8032</value>
     </property>
     <property>
         <name>yarn.resourcemanager.scheduler.address</name>
         <value>data1:8030</value>
     </property>
     <property>
         <name>yarn.resourcemanager.resource-tracker.address</name>
         <value>data1:8031</value>
     </property>
     <property>
         <name>yarn.resourcemanager.admin.address</name>
         <value>data1:8033</value>
     </property>
     <property>
         <name>yarn.resourcemanager.webapp.address</name>
         <value>data1:8088</value>
     </property>
</configuration>

<!-- slaves, 删除localhost -->
[root@data1 hadoop]# cat slaves 
data2
data3

启动

# master节点进行格式化
hadoop namenode -format
# 启动
[root@data1 hbase]# start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [data1]
data1: starting namenode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-namenode-data1.out
data3: starting datanode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-datanode-data3.out
data2: starting datanode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-datanode-data2.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-secondarynamenode-data1.out
starting yarn daemons
starting resourcemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-resourcemanager-data1.out
data3: starting nodemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-nodemanager-data3.out
data2: starting nodemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-nodemanager-data2.out

# 检查各个节点的状态
[root@data1 hbase]# jps
24048 ResourceManager
24307 Jps
23893 SecondaryNameNode
23711 NameNode

[root@data2 tmp]# jps
12341 DataNode
12442 NodeManager
12570 Jps

[root@data3 tmp]# jps
5187 DataNode
5288 NodeManager
5416 Jps

错误1

启动的时候报了以下错误,主要原因是底层文件version的配置信息clusterID不一样.删除name,data,tmp文件,重新格式化.

Starting secondary namenodes [0.0.0.0]
The authenticity of host '0.0.0.0 (0.0.0.0)' can't be established.
ECDSA key fingerprint is SHA256:/bqliO4L8XYMMr/5wVDufH9IjldwXwLWEol3eAEjuzc.
ECDSA key fingerprint is MD5:92:8e:24:a9:a1:e8:a9:55:8d:20:0f:4e:3d:34:dd:f0.
Are you sure you want to continue connecting (yes/no)? yes

测试

hadoop fs -mkdir -p /test
hadoop fs -ls /test
hadoop fs -put test.txt /test/
hadoop fs -cat /test/test.txt

# node1
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=1 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.17:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.17:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# node2
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=2 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.19:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.19:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# node3
docker run -d --name kafka --net host --restart always \
  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 \
  -e KAFKA_BROKER_ID=3 \
  -e KAFKA_CFG_LISTENERS=PLAINTEXT://192.168.3.20:9092 \
  -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.3.20:9092 \
  -e ALLOW_PLAINTEXT_LISTENER=yes \
  -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
  -v /root/kafka:/bitnami/kafka \
  bitnami/kafka:2.8.0

# 2种ui
docker run -d -p 18080:8889 --name ui freakchicken/kafka-ui-lite
docker run -d -p 9000:9000 --name ui -e ZK_HOSTS=192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 solsson/kafka-manager

# 测试命令
./kafka-topics.sh --create --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --replication-factor 1 --partitions 1 --topic first_kafka

./kafka-console-producer.sh --broker-list 192.168.3.17:9092,192.168.3.19:9092,192.168.3.20:9092 --topic first_kafka

./kafka-console-consumer.sh --bootstrap-server 192.168.3.17:9092,192.168.3.19:9092,192.168.3.20:9092 --topic first_kafka --from-beginning

./kafka-topics.sh --describe --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --topic first_kafka

./kafka-topics.sh --list --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181

./kafka-topics.sh --delete --zookeeper 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181 --topic first_kafka

vim /etc/hosts

所有机器配置好hosts.

ssh-keygen -t rsa

每台机器都执行一遍,一路Enter即可。

ssh-copy-id -i nodeName

每台机器都执行一遍,输入对应机器的密码.

ssh-copy-id -i node1
ssh-copy-id -i node2
ssh-copy-id -i node3
...

准备3台机器

安装

# 每台机器运行一条命令,注意ID和IP的对应
docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=1 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=2 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

docker run -d --name zookeeper --net host --restart always -e ZOO_MY_ID=3 \
-e ZOO_SERVERS="server.1=192.168.3.17:2888:3888;2181 server.2=192.168.3.19:2888:3888;2181 server.3=192.168.3.20:2888:3888;2181" \
-v /root/zookeeper/data:/data -v /root/zookeeper/datalog:/datalog zookeeper:3.7

# 测试
docker exec -it  zookeeper bash
# 连接一个节点create
zkCli.sh -server 192.168.3.17
create /mynode hello
# 连接另一个节点get
get /mynode

问题

navicat连接mysql8.0.26时报错.

Client does not support authentication protocol requested by server; consider upgrading MySQL client.

解决办法

# 进入mysql
docker exec -it mysql bash
# 登录
mysql -u root -p
use mysql;
-- 查看密码加密规则,8.0后使用的是caching_sha2_password,需要修改成mysql_native_password
select user,host,plugin from user where user='root';
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '123456';
ALTER USER 'root'@'%' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456';
flush privileges;

配置文件sentinel.conf

port 26379
sentinel monitor mymaster 192.168.41.128 6379 2
sentinel down-after-milliseconds mymaster 30000
sentinel parallel-syncs mymaster 1
sentinel failover-timeout mymaster 10000
sentinel deny-scripts-reconfig yes

启动1主2从

# 启动主节点
docker run -d --net host --name redis-server redis:6.2.5 redis-server --port 6379
# 启动从节点
docker run -d --net host --name redis-slave1 redis:6.2.5 redis-server --slaveof 192.168.41.128 6379  --port 6378
docker run -d --net host --name redis-slave2 redis:6.2.5 redis-server --slaveof 192.168.41.128 6379  --port 6377

启动哨兵

docker run -d -p 26379:26379 --name redis-sentinel1 -v $PWD/sentinel1.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf
docker run -d -p 26378:26379 --name redis-sentinel2 -v $PWD/sentinel2.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf
docker run -d -p 26377:26379 --name redis-sentinel3 -v $PWD/sentinel2.conf:/sentinel/sentinel.conf redis:6.2.5 redis-sentinel /sentinel/sentinel.conf

查看状态

[root@node1 redis]# docker exec -it redis-sentinel1 bash
root@be853924137f:/data# redis-cli -p 26379
127.0.0.1:26379> info sentinel
# Sentinel
sentinel_masters:1
sentinel_tilt:0
sentinel_running_scripts:0
sentinel_scripts_queue_length:0
sentinel_simulate_failure_flags:0
master0:name=mymaster,status=sdown,address=192.168.41.128:6379,slaves=1,sentinels=3

# 查看masters
127.0.0.1:26379> SENTINEL masters

# 查看slaves
127.0.0.1:26379> SENTINEL slaves mymaster

# 关闭redis-server
docker stop redis-server
# 刷新查看master节点是否更换
docker exec -it  redis-sentinel1 redis-cli -p 26379 sentinel masters