0%

安装

# 检查服务器是否安装了 VNC
rpm -qa | grep vnc
# 安装
yum install tigervnc tigervnc-server -y

使用

# 启动一个桌面
# 启动完成默认监听的是5901端口
vncserver :1

# 可以再启动一个桌面
# 启动完成默认监听的是5902端口
vncserver :1

# 查看桌面列表
[root@hadoop3 ~]# vncserver -list

TigerVNC server sessions:

X DISPLAY #    PROCESS ID
:1        19243
:2      18722

# 删除一个桌面
vncserver -kill :2

# 为当前用户创建vnc密码
[root@ ~]# vncpasswd
Password:
Verify:

如果开通了防火墙,就需要手动开放相关端口

iptables -I INPUT -p tcp --dport 5901 -j ACCEPT
iptables -I INPUT -p tcp --dport 5902 -j ACCEPT

测试VNC服务

  1. 下载VNC Viewer
  2. 启动VNC Viewer软件
  3. Server输入 192.168.3.11:1
  4. 输入密码

遇到的问题

vnc和anaconda冲突


Could not make bus activated clients aware of XDG_CURRENT_DESKTOP=GNOME envi

解决办法:

  1. 注释掉~/.bashrc中anaconda相关的环境变量设置
  2. 执行source ~/.bashrc
  3. 重启一个xshell连接,启动vnc
  4. 还原~/.bashrc中注释掉的anaconda相关的环境变量
  5. 执行source ~/.bashrc

参考链接

docker部署

# MONGO_INITDB_ROOT_USERNAME 设置root用户名
# MONGO_INITDB_ROOT_PASSWORD 设置root密码
# --auth 需要密码才能访问容器服务
# -v /my/own/datadir:/data/db 设置持久化存储
docker run -d --name mongo -p 27017:27017 -e MONGO_INITDB_ROOT_USERNAME=admin -e MONGO_INITDB_ROOT_PASSWORD=123456  mongo --auth

测试

docker exec -it mongo mongo admin
# 测试root用户登录
> db.auth('admin','123456')
1

# 创建一个名为 test,密码为 123456 的用户。
>  db.createUser({ user:'test',pwd:'123456',roles:[ { role:'userAdminAnyDatabase', db: 'admin'},"readWriteAnyDatabase"]});

使用

说明

作为 Docker 的替代品,Podman 的操作和 Docker 高度兼容,在运行方式上, Docker 依赖于守护进程,Podman 不需要守护进程。

CentOS安装

yum -y install podman

简单使用

# 创建一个容器
podman run --rm -p 80:80 --name nginx nginx:alpine

设置容器开机启动

由于Podman没有守护进程,所以自身无法实现开机自启功能,需要配合系统本身实现.

# 为刚刚的容器创建一个服务配置
podman generate systemd nginx > nginx.service
# 修改文件权限
chmod +x nginx.service
# 服务移动到系统目录下
mv nginx.service /usr/lib/systemd/system/
systemctl daemon-reload
# 通过 systemctl 命令来管理这个容器开机自启动
systemctl enable/disable nginx

前提

  • 已搭建hbase集群
  • GnuPlot 4.2+

步骤

  1. 安装GnuPlot
yum install -y gnuplot
  1. 下载opentsdb.rpm
  1. 安装opentsdb
rpm -ivh opentsdb-2.4.0.noarch.rpm
  1. 修改opentsdb.conf
vim /etc/opentsdb/opentsdb.conf

tsd.network.port = 4242
tsd.core.auto_create_metrics = true
tsd.storage.hbase.zk_quorum = 192.168.3.17:2181,192.168.3.19:2181,192.168.3.20:2181
  1. 在HBase中创建表结构
    create 'tsdb',{NAME => 't', VERSIONS => 1, BLOOMFILTER => 'ROW'}
    create 'tsdb-uid',{NAME => 'id', BLOOMFILTER => 'ROW'},{NAME => 'name', BLOOMFILTER => 'ROW'}
    create 'tsdb-tree',{NAME => 't', VERSIONS => 1, BLOOMFILTER => 'ROW'}
    create 'tsdb-meta',{NAME => 'name', BLOOMFILTER => 'ROW'}

压缩(COMPRESSION)类型有NONE, LZO, GZIP , SNAPPY. 如使用SNAPPY

create 'tsdb',{NAME => 't', VERSIONS => 1, BLOOMFILTER => 'ROW', COMPRESSION => 'SNAPPY'}
create 'tsdb-uid',{NAME => 'id', BLOOMFILTER => 'ROW', COMPRESSION => 'SNAPPY'},{NAME => 'name', BLOOMFILTER => 'ROW', COMPRESSION => 'SNAPPY'}
create 'tsdb-tree',{NAME => 't', VERSIONS => 1, BLOOMFILTER => 'ROW', COMPRESSION => 'SNAPPY'}
create 'tsdb-meta',{NAME => 'name', BLOOMFILTER => 'ROW', COMPRESSION => 'SNAPPY'}
  1. 创建opentsdb服务
[root@data1 opentsdb]# cat opentsdb.service
[Unit]
Description=OpenTSDB Service
[Service]
Type=forking
PrivateTmp=yes
ExecStart=/usr/share/opentsdb/etc/init.d/opentsdb start
ExecStop=/usr/share/opentsdb/etc/init.d/opentsdb stop
Restart=on-abort
[Install]
WantedBy=multi-user.target

# 
chmod +x opentsdb.service 
cp opentsdb.service /etc/systemd/system/
systemctl enable opentsdb.service
  1. 启动
    systemctl status/start/stop/restart opentsdb

日志

/var/log/opentsdb

遇到的问题

2020-10-13 12:21:31,740 WARN  [AsyncHBase I/O Worker #1] HBaseClient: Probe Exists(table="tsdb-uid", key=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 65, 115, 121, 110, 99, 72, 66, 97, 115, 101, 126, 112, 114, 111, 98, 101, 126, 60, 59, 95, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 65, 115, 121, 110, 99, 72, 66, 97, 115, 101, 126, 112, 114, 111, 98, 101, 126, 60, 59, 95, 60], family=null, qualifiers=null, attempt=0, region=RegionInfo(table="tsdb-uid", region_name="tsdb-uid,,1542978859652.665eaacf411c9f82e13e35a62cfff831.", stop_key="")) failed
org.hbase.async.NonRecoverableException: Too many attempts: Exists(table="tsdb-uid", key=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 65, 115, 121, 110, 99, 72, 66, 97, 115, 101, 126, 112, 114, 111, 98, 101, 126, 60, 59, 95, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58, 65, 115, 121, 110, 99, 72, 66, 97, 115, 101, 126, 112, 114, 111, 98, 101, 126, 60, 59, 95, 60], family=null, qualifiers=null, attempt=11, region=RegionInfo(table="tsdb-uid", region_name="tsdb-uid,,1542978859652.665eaacf411c9f82e13e35a62cfff831.", stop_key=""))
    at org.hbase.async.HBaseClient.tooManyAttempts(HBaseClient.java:2056) [asynchbase-1.7.2.jar:na]
    at org.hbase.async.HBaseClient.sendRpcToRegion(HBaseClient.java:1920) [asynchbase-1.7.2.jar:na]
    at org.hbase.async.HBaseClient$1RetryRpc.call(HBaseClient.java:1944) [asynchbase-1.7.2.jar:na]

原因可能是由于hbase压缩算法不支持,创建的表有问题。

前提

  • 已经配置好java环境
  • 已经搭建zookeeper集群
  • 已经搭建Hadoop集群
  • 准备3台机器

步骤

  1. 拷贝hadoop的hdfs-site.xmlcore-site.xml 放到hbase/conf下.

    cp hdfs-site.xml /root/hbase/hbase-1.2.0/conf/
    cp core-site.xml  /root/hbase/hbase-1.2.0/conf/
  2. 配置hbase-env.sh

    export JAVA_HOME=/usr/local/java/jdk1.8.0_171
    # 告诉hbase使用外部的zk
    export HBASE_MANAGES_ZK=false
  3. 配置hbase-site.xml

    <configuration>
     <property>
         <name>fs.defaultFS</name>
         <!-- hdfs://data1:9000注意和core-site里保持一致 -->
         <value>hdfs://data1:9000/hbase</value>
     </property>
     <property>
         <name>hbase.cluster.distributed</name>
         <value>true</value>
     </property>
     <property>
         <name>hbase.zookeeper.quorum</name>
         <value>data1:2181,data2:2181,data3:2181</value>
     </property>
    </configuration>
  4. 配置regionservers

    [root@data1 conf]# cat regionservers 
    data2
    data3
  5. 配置backup-masters

    echo data2 > backup-masters
  6. 将hbase程序及配置拷贝到其它机器

scp -r hbase-1.2.0 root@data2:/root/hbase/
scp -r hbase-1.2.0 root@data3:/root/hbase/
  1. 启动hbase

    # 在master节点启动即可
    bin/start-hbase.sh
  2. 配置环境变量

    vim /etc/profile
    

export HBASE_HOME=/root/hbase/hbase-1.2.0
export PATH=$PATH:$HBASE_HOME/bin

source /etc/profile


9. 进入hbase
```bash
hbase shell

测试

  1. UI

http://master:16010/master-status

  1. hbase(main):001:0> create 'test','cf'
    0 row(s) in 1.3920 seconds
    

=> Hbase::Table - test
hbase(main):002:0> list
TABLE
test
1 row(s) in 0.0170 seconds

=> [“test”]
hbase(main):003:0> put ‘test’, ‘row1’, ‘cf:a’, ‘value1’
0 row(s) in 0.1200 seconds

hbase(main):004:0> scan ‘test’
ROW COLUMN+CELL
row1 column=cf:a, timestamp=1629967515721, value=value1
1 row(s) in 0.0210 seconds

hbase(main):005:0>



说明

一个master,2个slave,只有一个nameNode.

注意:3台机器的安装目录及配置要求完全一致.可以先配置一台再拷贝到其它机器上.

主备3台机器

# 安装java环境
yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel

vim /etc/profile

export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-0.el7_7.x86_64
# hadoop 解压目录
export HADOOP_HOME=/root/hbase/hadoop-2.6.0
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
# 生效
source /etc/profile

下载二进制包

# zookeeper集群安装省略.参考用docker安装
wget http://archive.apache.org/dist/zookeeper/zookeeper-3.4.5/zookeeper-3.4.5.tar.gz

wget http://archive.apache.org/dist/hadoop/common/hadoop-2.6.0/hadoop-2.6.0.tar.gz

wget http://archive.apache.org/dist/hbase/1.2.0/hbase-1.2.0-bin.tar.gz

添加hadoop用户

usermod -a -G hadoop hadoop
passwd hadoop

vim /etc/sudoers

root    ALL=(ALL)     ALL
hadoop    ALL=(ALL)     ALL

配置免密省略.

创建name,data,tmp目录

mkdir -p dfs/name
mkdir -p dfs/data
mkdir tmp

关键配置

所有配置文件在hadoop-2.6.0/etc/hadoop/下:

<!-- hadoop-env.sh   yarn-env.sh 配置JAVA_HOME -->

<!-- core-site.xml  -->
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://data1:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/root/hbase/tmp</value>
    </property>
</configuration>

<!-- hdfs-site.xml -->
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>/root/hbase/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/root/hbase/dfs/data</value>
    </property>
</configuration>

<!-- mapred-site.xml -->
<configuration>
   <property>
      <name>mapred.job.tracker</name>
      <value>data1:9001</value>
   </property>
</configuration>

<!-- yarn-site.xml -->

<configuration>
     <property>
         <name>yarn.nodemanager.aux-services</name>
         <value>mapreduce_shuffle</value>
     </property>
     <property>
         <name>yarn.resourcemanager.address</name>
         <value>data1:8032</value>
     </property>
     <property>
         <name>yarn.resourcemanager.scheduler.address</name>
         <value>data1:8030</value>
     </property>
     <property>
         <name>yarn.resourcemanager.resource-tracker.address</name>
         <value>data1:8031</value>
     </property>
     <property>
         <name>yarn.resourcemanager.admin.address</name>
         <value>data1:8033</value>
     </property>
     <property>
         <name>yarn.resourcemanager.webapp.address</name>
         <value>data1:8088</value>
     </property>
</configuration>

<!-- slaves, 删除localhost -->
[root@data1 hadoop]# cat slaves 
data2
data3

启动

# master节点进行格式化
hadoop namenode -format
# 启动
[root@data1 hbase]# start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [data1]
data1: starting namenode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-namenode-data1.out
data3: starting datanode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-datanode-data3.out
data2: starting datanode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-datanode-data2.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /root/hbase/hadoop-2.6.0/logs/hadoop-root-secondarynamenode-data1.out
starting yarn daemons
starting resourcemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-resourcemanager-data1.out
data3: starting nodemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-nodemanager-data3.out
data2: starting nodemanager, logging to /root/hbase/hadoop-2.6.0/logs/yarn-root-nodemanager-data2.out

# 检查各个节点的状态
[root@data1 hbase]# jps
24048 ResourceManager
24307 Jps
23893 SecondaryNameNode
23711 NameNode

[root@data2 tmp]# jps
12341 DataNode
12442 NodeManager
12570 Jps

[root@data3 tmp]# jps
5187 DataNode
5288 NodeManager
5416 Jps

错误1

启动的时候报了以下错误,主要原因是底层文件version的配置信息clusterID不一样.删除name,data,tmp文件,重新格式化.

Starting secondary namenodes [0.0.0.0]
The authenticity of host '0.0.0.0 (0.0.0.0)' can't be established.
ECDSA key fingerprint is SHA256:/bqliO4L8XYMMr/5wVDufH9IjldwXwLWEol3eAEjuzc.
ECDSA key fingerprint is MD5:92:8e:24:a9:a1:e8:a9:55:8d:20:0f:4e:3d:34:dd:f0.
Are you sure you want to continue connecting (yes/no)? yes

测试

hadoop fs -mkdir -p /test
hadoop fs -ls /test
hadoop fs -put test.txt /test/
hadoop fs -cat /test/test.txt