0%

nfs-client

安装NFS

# 安装并设置自动启动
yum install nfs-utils
systemctl enable rpcbind
systemctl enable nfs
# 启动
systemctl start rpcbind
systemctl start nfs
# 服务启动之后,我们在服务端配置一个共享目录
mkdir /data
chmod 755 /data
# 根据这个目录,相应配置导出目录
vi /etc/exports
# 添加如下配置
/data/     192.168.0.0/24(rw,sync,no_root_squash,no_all_squash)
# 保存设置之后,重启 NFS 服务
systemctl restart nfs
# 可以检查一下本地的共享目录
showmount -e localhost
Export list for localhost:
/data 192.168.0.0/24

helm安装nfs-client

# 添加仓库
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
# 更新
helm repo update
# 安装
helm install my-nfs nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
    --set nfs.server=x.x.x.x \
    --set nfs.path=/data

# 设置镜像仓库,默认仓库拉不下来
--set image.repository=hub.deri.org.cn/k8s/nfs-subdir-external-provisioner
# 设置SC名字,默认nfs-client
--set storageClass.name=my-nfs
# 设置供应商名字,默认自动生成一个
--set storageClass.provisionerName=cluster.local/nfsxxx
# 完整安装命令
helm install nfs-client nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --set nfs.server=192.168.3.22 --set nfs.path=/data/nfs --set image.repository=hub.deri.org.cn/k8s/nfs-subdir-external-provisioner --set storageClass.name=nfs-client --set storageClass.provisionerName=cluster.local/nfs-client

etcd

ETCD对磁盘要求较高,官方推荐SSD起步。

ENDPOINTS=https://192.168.3.28:2379,https://192.168.3.29:2379,https://192.168.3.30:2379

# 查看集群状态
/usr/local/bin/etcdctl --cacert=/etc/ssl/etcd/ssl/ca.pem --cert=/etc/ssl/etcd/ssl/member-05.pem --key=/etc/ssl/etcd/ssl/member-05-key.pem  --endpoints=$ENDPOINT endpoint status

# 查看健康状态
/usr/local/bin/etcdctl --cacert=/etc/ssl/etcd/ssl/ca.pem --cert=/etc/ssl/etcd/ssl/member-05.pem --key=/etc/ssl/etcd/ssl/member-05-key.pem  --endpoints=$ENDPOINT endpoint health

# 查看所有监控指标
curl --cacert /etc/ssl/etcd/ssl/ca.pem --cert /etc/ssl/etcd/ssl/member-05.pem --key /etc/ssl/etcd/ssl/member-05-key.pem  https://10.201.112.28:2379/metrics 
# 查看和磁盘相关的
curl --cacert /etc/ssl/etcd/ssl/ca.pem --cert /etc/ssl/etcd/ssl/member-05.pem --key /etc/ssl/etcd/ssl/member-05-key.pem  https://10.201.112.28:2379/metrics | grep disk_backend_commit_duration_seconds

说明

bookstack作为confluence的开源替代品,部署简单,使用方便.

docker部署

部署步骤

  • 需要准备一个MySQL,创建好数据库bookstack.
  • docker启动
# 注意APP_URL,这里是你访问bookstack的地址,如果你是通过域名访问则输入你的域名,如https://book.test.com
docker run -d --name bookstack  -e PUID=1000 -e PGID=1000 -e DB_HOST=192.168.3.27  -e DB_PORT=3306 -e DB_USER=root -e DB_PASS=123456 -e DB_DATABASE=bookstack -p 6875:80 -e APP_URL=http://192.168.3.27:6875 -e TZ=Asia/Shanghai  linuxserver/bookstack

接入LDAP

docker run -d --name bookstack  -e PUID=1000 -e PGID=1000 -e DB_HOST=192.168.3.27  -e DB_PORT=3306 -e DB_USER=root -e DB_PASS=123456 -e DB_DATABASE=bookstack -p 6875:80 -e APP_URL=http://192.168.3.27:6875 -e TZ=Asia/Shanghai -e AUTH_METHOD=ldap -e LDAP_SERVER=192.168.0.9:389 -e LDAP_BASE_DN="ou=xxxxx,dc=xxxx,dc=xxxx" -e LDAP_DN="cn=xxx,dc=xxx,dc=xxxx" -e LDAP_VERSION=3 -e LDAP_PASS="xxxxxxxxxxxxxxxxxx"  linuxserver/bookstack

也可以直接修改.env文件然后映射到容器中/config/www/.env. .env文件中有其它默认配置,别弄丢了.

持久化

-v /data/bookstack/app:/app -v /data/bookstack/config:/config

原表

有这样一张菜单表,菜单下可以有子菜单,需求:给你一个菜单ID,让你查出这个菜单下所有子菜单。

CREATE TABLE `t_menu` (
  `menu_id` int unsigned NOT NULL AUTO_INCREMENT COMMENT '菜单ID',
  `menu_name` varchar(128) NOT NULL COMMENT '菜单名称',
  `menu_url` varchar(128) DEFAULT NULL COMMENT '菜单url',
  `menu_level` int DEFAULT '-1' COMMENT '菜单级别',
  `menu_type` int NOT NULL DEFAULT '0' COMMENT '菜单类型,0菜单,1按钮',
  `menu_patent_id` int unsigned DEFAULT NULL COMMENT '父菜单ID',
  `menu_icon` varchar(255) DEFAULT NULL COMMENT '资源图标',
  `menu_order` int DEFAULT '0' COMMENT '资源顺序',
  PRIMARY KEY (`menu_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;

递归查询

MySQL8新增了 WITH RECURSIVE 递归查询父子集的方法,低于这个版本不生效.

-- 定义临时表 temp,缓存所有查询出来的结果
WITH recursive temp AS( 
    -- 根据菜单ID查询这条记录
    SELECT * FROM t_menu WHERE menu_id=1
    UNION ALL
    -- 匹配父菜单ID和菜单ID相等的记录,并加入到 temp 表
    SELECT m.* FROM t_menu m,temp t WHERE m.menu_patent_id=t.menu_id
) 
-- 查询temp表中的记录
SELECT * FROM temp;

zookeeper

docker run -d --name zookeeper -p 2181:2181 --restart always -e ZOO_MY_ID=1 -e ALLOW_ANONYMOUS_LOGIN=yes -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime zookeeper:3.7

kafka

# 注意:KAFKA_CFG_ADVERTISED_LISTENERS要改成你自己宿主机的IP
docker run -d --name kafka -p 9092:9092 --restart=always  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.1.12:2181 -e KAFKA_BROKER_ID=1 -e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.1.12:9092 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime bitnami/kafka:2.8.0 

redis

docker run -d --name redis -p 6379:6379 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime redis

mysql

docker run -d --name mysql -e MYSQL_ROOT_PASSWORD=123456 -p 3306:3306 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime --restart=always mysql:8.0.32
  • mysql8密码加密规则变更
use mysql;
-- 查看密码加密规则,8.0后使用的是caching_sha2_password,需要修改成mysql_native_password
select user,host,plugin from user where user='root';
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '123456';
ALTER USER 'root'@'%' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456';
flush privileges;
  • 机器性能不好可以设置配置
sync_binlog=0
innodb_flush_log_at_trx_commit=2

kafka-manager

docker run -d -p 9000:9000 --restart=always --name kafka-ui -e ZK_HOSTS=192.168.3.27:2181 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime solsson/kafka-manager

kafka-ui-lite

docker run -d --name ui --restart=always -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime -p 8889:8889 freakchicken/kafka-ui-lite

引入依赖

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>

配置

spring:
  kafka:
    bootstrap-servers: ${KAFKA_SERVERS:localhost:9092}
    producer:
      retries: 0
      acks: 1
      batch-size: 100000
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
    # 默认的group名称
      group-id: service-group
    #   关闭自动提交
      enable-auto-commit: false
      auto-commit-interval: 1000
      auto-offset-reset: latest
      max-poll-records: 1000
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      ack-mode: manual_immediate
      missing-topics-fatal: false
      type: batch
# 自定义配置监听的topic
log:
  topics: topic1,topic2,topic3

java

@KafkaListener(topics ="#{'${log.topics}'.split(',')}")
// public void processMessage(List<String> records, Acknowledgment ack) {
// public void processMessage(ConsumerRecord<?, ?> records, Acknowledgment ack) {
public void processMessage(List<ConsumerRecord<?, ?>> records, Acknowledgment ack) {
    try {
        System.out.println(records.size());
        System.out.println(records.get(0).value());
        // 手动提交
        ack.acknowledge();
    } catch (Exception e) {
        e.printStackTrace();
    }
}