0%

说明

bookstack作为confluence的开源替代品,部署简单,使用方便.

docker部署

部署步骤

  • 需要准备一个MySQL,创建好数据库bookstack.
  • docker启动
# 注意APP_URL,这里是你访问bookstack的地址,如果你是通过域名访问则输入你的域名,如https://book.test.com
docker run -d --name bookstack  -e PUID=1000 -e PGID=1000 -e DB_HOST=192.168.3.27  -e DB_PORT=3306 -e DB_USER=root -e DB_PASS=123456 -e DB_DATABASE=bookstack -p 6875:80 -e APP_URL=http://192.168.3.27:6875 -e TZ=Asia/Shanghai  linuxserver/bookstack

接入LDAP

docker run -d --name bookstack  -e PUID=1000 -e PGID=1000 -e DB_HOST=192.168.3.27  -e DB_PORT=3306 -e DB_USER=root -e DB_PASS=123456 -e DB_DATABASE=bookstack -p 6875:80 -e APP_URL=http://192.168.3.27:6875 -e TZ=Asia/Shanghai -e AUTH_METHOD=ldap -e LDAP_SERVER=192.168.0.9:389 -e LDAP_BASE_DN="ou=xxxxx,dc=xxxx,dc=xxxx" -e LDAP_DN="cn=xxx,dc=xxx,dc=xxxx" -e LDAP_VERSION=3 -e LDAP_PASS="xxxxxxxxxxxxxxxxxx"  linuxserver/bookstack

也可以直接修改.env文件然后映射到容器中/config/www/.env. .env文件中有其它默认配置,别弄丢了.

持久化

-v /data/bookstack/app:/app -v /data/bookstack/config:/config

原表

有这样一张菜单表,菜单下可以有子菜单,需求:给你一个菜单ID,让你查出这个菜单下所有子菜单。

CREATE TABLE `t_menu` (
  `menu_id` int unsigned NOT NULL AUTO_INCREMENT COMMENT '菜单ID',
  `menu_name` varchar(128) NOT NULL COMMENT '菜单名称',
  `menu_url` varchar(128) DEFAULT NULL COMMENT '菜单url',
  `menu_level` int DEFAULT '-1' COMMENT '菜单级别',
  `menu_type` int NOT NULL DEFAULT '0' COMMENT '菜单类型,0菜单,1按钮',
  `menu_patent_id` int unsigned DEFAULT NULL COMMENT '父菜单ID',
  `menu_icon` varchar(255) DEFAULT NULL COMMENT '资源图标',
  `menu_order` int DEFAULT '0' COMMENT '资源顺序',
  PRIMARY KEY (`menu_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;

递归查询

MySQL8新增了 WITH RECURSIVE 递归查询父子集的方法,低于这个版本不生效.

-- 定义临时表 temp,缓存所有查询出来的结果
WITH recursive temp AS( 
    -- 根据菜单ID查询这条记录
    SELECT * FROM t_menu WHERE menu_id=1
    UNION ALL
    -- 匹配父菜单ID和菜单ID相等的记录,并加入到 temp 表
    SELECT m.* FROM t_menu m,temp t WHERE m.menu_patent_id=t.menu_id
) 
-- 查询temp表中的记录
SELECT * FROM temp;

zookeeper

docker run -d --name zookeeper -p 2181:2181 --restart always -e ZOO_MY_ID=1 -e ALLOW_ANONYMOUS_LOGIN=yes -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime zookeeper:3.7

kafka

# 注意:KAFKA_CFG_ADVERTISED_LISTENERS要改成你自己宿主机的IP
docker run -d --name kafka -p 9092:9092 --restart=always  -e KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.1.12:2181 -e KAFKA_BROKER_ID=1 -e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.1.12:9092 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime bitnami/kafka:2.8.0 

redis

docker run -d --name redis -p 6379:6379 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime redis

mysql

docker run -d --name mysql -e MYSQL_ROOT_PASSWORD=123456 -p 3306:3306 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime --restart=always mysql:8.0.32
  • mysql8密码加密规则变更
use mysql;
-- 查看密码加密规则,8.0后使用的是caching_sha2_password,需要修改成mysql_native_password
select user,host,plugin from user where user='root';
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '123456';
ALTER USER 'root'@'%' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456';
flush privileges;
  • 机器性能不好可以设置配置
sync_binlog=0
innodb_flush_log_at_trx_commit=2

kafka-manager

docker run -d -p 9000:9000 --restart=always --name kafka-ui -e ZK_HOSTS=192.168.3.27:2181 -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime solsson/kafka-manager

kafka-ui-lite

docker run -d --name ui --restart=always -e TZ=Asia/Shanghai -v /etc/localtime:/etc/localtime -p 8889:8889 freakchicken/kafka-ui-lite

引入依赖

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>

配置

spring:
  kafka:
    bootstrap-servers: ${KAFKA_SERVERS:localhost:9092}
    producer:
      retries: 0
      acks: 1
      batch-size: 100000
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
    # 默认的group名称
      group-id: service-group
    #   关闭自动提交
      enable-auto-commit: false
      auto-commit-interval: 1000
      auto-offset-reset: latest
      max-poll-records: 1000
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      ack-mode: manual_immediate
      missing-topics-fatal: false
      type: batch
# 自定义配置监听的topic
log:
  topics: topic1,topic2,topic3

java

@KafkaListener(topics ="#{'${log.topics}'.split(',')}")
// public void processMessage(List<String> records, Acknowledgment ack) {
// public void processMessage(ConsumerRecord<?, ?> records, Acknowledgment ack) {
public void processMessage(List<ConsumerRecord<?, ?>> records, Acknowledgment ack) {
    try {
        System.out.println(records.size());
        System.out.println(records.get(0).value());
        // 手动提交
        ack.acknowledge();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

说明

本文通过安装 docker loki plugin 直接采集docker容器日志,并推送至loki。官方文档

插件安装

# 安装最新的插件
docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
# 查看插件
[root@data1 ~]# docker plugin ls
ID             NAME          DESCRIPTION           ENABLED
744b79d5d1a9   loki:latest   Loki Logging Driver   true

插件升级

# 停用
docker plugin disable loki --force
# 升级
docker plugin upgrade loki grafana/loki-docker-driver:latest --grant-all-permissions
# 启用
docker plugin enable loki
# 重启docker
systemctl restart docker

插件卸载

docker plugin disable loki --force
docker plugin rm loki

使用

单独为一个容器设置日志驱动

# --log-driver=loki
docker run --log-driver=loki \
    --log-opt loki-url="http://IP:3100/loki/api/v1/push" \
    --log-opt loki-retries=5 \
    --log-opt loki-batch-size=400 \
    --log-opt max-size=50m \
    --log-opt max-file=3 \
    grafana/grafana

为所有的容器设置默认参数

编辑/etc/docker/daemon.json文件(如果没有就新建).

{
    "debug" : true,
    "log-driver": "loki",
    "log-opts": {
        "loki-url": "http://IP:3100/loki/api/v1/push",
        "loki-batch-size": "400",
        "loki-retries": 5,
        "max-size": "50m",
        "max-file": "10"
    }
}

更多如docker-compose的用法参考官网文档.

  • loki.yaml
auth_enabled: false

server:
  http_listen_port: 3100
  grpc_listen_port: 9095

ingester:
  chunk_idle_period: 3m
  chunk_block_size: 262144
  chunk_retain_period: 1m
  max_transfer_retries: 0
  lifecycler:
    ring:
      kvstore:
        store: inmemory
      replication_factor: 1

limits_config:
  reject_old_samples: true
  reject_old_samples_max_age: 168h

common:
  path_prefix: /tmp/loki
  storage:
    filesystem:
      chunks_directory: /tmp/loki/chunks
      rules_directory: /tmp/loki/rules
  replication_factor: 1
  ring:
    instance_addr: 127.0.0.1
    kvstore:
      store: inmemory

storage_config:
  boltdb_shipper:
    active_index_directory: /tmp/loki/boltdb-shipper-active
    cache_location: /tmp/loki/boltdb-shipper-cache
    cache_ttl: 24h
    shared_store: filesystem
  filesystem:
    directory: /tmp/loki/chunks

chunk_store_config:
  max_look_back_period: 672h

table_manager:
  retention_deletes_enabled: true
  retention_period: 672h

compactor:
  working_directory: /tmp/loki/boltdb-shipper-compactor
  shared_store: filesystem
  retention_enabled: true
  retention_delete_delay: 2h

query_range:
  results_cache:
    cache:
      embedded_cache:
        enabled: true
        max_size_mb: 200

querier:
  query_timeout: 2m

schema_config:
  configs:
    - from: 2020-10-24
      store: boltdb-shipper
      object_store: filesystem
      schema: v11
      index:
        prefix: index_
        period: 24h

# ruler:
#   alertmanager_url: http://localhost:9093

# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
#
# Statistics help us better understand how Loki is used, and they show us performance
# levels for most users. This helps us prioritize features and documentation.
# For more information on what's sent, look at
# https://github.com/grafana/loki/blob/main/pkg/usagestats/stats.go
# Refer to the buildReport method to see what goes into a report.
#
# If you would like to disable reporting, uncomment the following lines:
analytics:
 reporting_enabled: false

说明

本文通过 自定义注解 + Redis 实现接口限流。

实现

  • 自定义注解
@Inherited
@Documented
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
// 默认就是60秒内可以访问2次
public @interface RequestLimit {
    /**
     * 时间,秒为单位
     */
    int second() default 60;
    /**
     * second时间内允许访问的次数
     */
    int maxCount() default 2;
}
  • 通过AOP实现限流
@Configuration
@EnableAspectJAutoProxy
@Aspect
@Slf4j
@Order(-5) // 如果有多个AOP,需要将Order设置到最小,优先判断限流
public class RequestLimitAspect {
    // 这里通过将访问信息保存在redis中,判断是否限流
    @Autowired
    private RedisTemplate<String, String> redisTemplate;
    // 定义切点,所有添加RequestLimit注解的
    @Pointcut(value = "@annotation(org.xxx.xxx.annotation.RequestLimit)")
    private void handleRequestLimit() {
    }

    @Around("handleRequestLimit()")
    public Object handleResponseEncrypt(ProceedingJoinPoint pjp) throws Throwable {
        MethodSignature signature = (MethodSignature) pjp.getSignature();
        Method method = signature.getMethod();
        RequestLimit requestLimit = method.getAnnotation(RequestLimit.class);
        // 没有添加注解直接放行
        if (null == requestLimit) {
            return pjp.proceed();
        }
        ServletRequestAttributes requestAttributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
        // 获取HttpServletRequest
        HttpServletRequest request = (HttpServletRequest) requestAttributes.resolveReference(RequestAttributes.REFERENCE_REQUEST);
        // 获取公网IP
        String ip = CommonUtils.getPublicIp(request);
        // 实现同一个公网IP 限流
        String key = "limit:" + ip + ":" + request.getServletPath();
        String count = redisTemplate.opsForValue().get(key);
        if (StringUtils.isEmpty(count)) {
            count = "1";
            redisTemplate.opsForValue().set(key, count, requestLimit.second(), TimeUnit.SECONDS);
            return pjp.proceed();
        }
        if (Integer.valueOf(count) < requestLimit.maxCount()) {
            redisTemplate.opsForValue().increment(key);
            return pjp.proceed();
        }
        log.info("RemoteAddr:{} 请求接口:{} 请求过于频繁, 已拒绝请求.", ip, request.getServletPath());
        HttpServletResponse response = requestAttributes.getResponse();
        response.setCharacterEncoding("UTF-8");
        response.setContentType("application/json; charset=utf-8");
        response.getWriter()
                // 触发限流自定义返回内容
                .println(JSON.toJSONString(new Response(Constant.ResponseCode.RequestLimit, Constant.ResponseMsg.RequestLimit)));
        return null;

    }
}
  • 获取公网IP
public static String getPublicIp(HttpServletRequest request) {
    String ip = null;
    try {
        ip = request.getHeader("x-forwarded-for");
        if (StringUtils.isEmpty(ip) || "unknown".equalsIgnoreCase(ip)) {
            ip = request.getHeader("Proxy-Client-IP");
        }
        if (StringUtils.isEmpty(ip) || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
            ip = request.getHeader("WL-Proxy-Client-IP");
        }
        if (StringUtils.isEmpty(ip) || "unknown".equalsIgnoreCase(ip)) {
            ip = request.getHeader("HTTP_CLIENT_IP");
        }
        if (StringUtils.isEmpty(ip) || "unknown".equalsIgnoreCase(ip)) {
            ip = request.getHeader("HTTP_X_FORWARDED_FOR");
        }
        if (StringUtils.isEmpty(ip) || "unknown".equalsIgnoreCase(ip)) {
            ip = request.getRemoteAddr();
        }
    } catch (Exception e) {
        log.error("getPublicIp ERROR ", e);
    }
    //使用代理,则获取第一个IP地址
    if (StringUtils.isEmpty(ip) && ip.length() > 15) {
        if (ip.indexOf(",") > 0) {
            ip = ip.substring(0, ip.indexOf(","));
        }
    }
    return ip;
}