0%

启动参数

  • --requirepass 123456:设置客户端连接redis的认证信息
  • --masterauth 123456:设置从节点连接redis的认证信息

一主一从

# 启动主节点
docker run -d -p 6379:6379 --name redis-server redis:6.2.5 redis-server

# 启动从节点,可以启动多个
docker run -d -p 6378:6379 --name redis-slave redis:6.2.5 redis-server --slaveof 192.168.41.128 6379

# 查看主从状态
docker exec -it redis-server bash
root@2057712d5b24:/data# redis-cli 
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:1
slave0:ip=172.17.0.1,port=6379,state=online,offset=844,lag=0
master_failover_state:no-failover
master_replid:a7c3448b15070b13848c48139b99016a67cb5b9e
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:844
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:844


# 测试
# 主节点set,从节点可以get
# 从节点只支持读操作,不支持写

一些重要参数

  • --cluster-enabled:是否启动集群,选值:yes 、no
  • --cluster-config-file 配置文件.conf :指定节点信息,自动生成
  • --cluster-node-timeout 毫秒值: 配置节点连接超时时间
  • --appendonly:是否开启持久化,选值:yes、no

最简单3主模式

方便测试,使用host网络模式启动docker,且并没有将持久化映射出来.

持久化启动参数-v /data/redis-data/node1:/data.

# 节点1
docker run -d --name redis-node1 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-1.conf --port 6379
# 节点2
docker run -d --name redis-node2 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-2.conf --port 6380
# 节点3
docker run -d --name redis-node3 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-3.conf --port 6381

# 进入一个容器执行创建集群的命令
docker exec -it redis-node1 /bin/bash
# 创建集群,只有三个节点,所有指定副本数为0
redis-cli --cluster create 192.168.41.128:6379  192.168.41.128:6380  192.168.41.128:6381 --cluster-replicas 0
>>> Performing hash slots allocation on 3 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
M: 699e2ff7c0098cc1cee1ddae640b4bbb66747c72 192.168.41.128:6379
   slots:[0-5460] (5461 slots) master
M: ee2f07166809df6a56a57454470d65944ab8051f 192.168.41.128:6380
   slots:[5461-10922] (5462 slots) master
M: 043f76013aec2600396c4e92c50d0861f01a0dde 192.168.41.128:6381
   slots:[10923-16383] (5461 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.41.128:6379)
M: 699e2ff7c0098cc1cee1ddae640b4bbb66747c72 192.168.41.128:6379
   slots:[0-5460] (5461 slots) master
M: ee2f07166809df6a56a57454470d65944ab8051f 192.168.41.128:6380
   slots:[5461-10922] (5462 slots) master
M: 043f76013aec2600396c4e92c50d0861f01a0dde 192.168.41.128:6381
   slots:[10923-16383] (5461 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

# 查看集群状态
root@node1:/data# redis-cli 
127.0.0.1:6379> cluster nodes
ee2f07166809df6a56a57454470d65944ab8051f 192.168.41.128:6380@16380 master - 0 1629358092049 2 connected 5461-10922
043f76013aec2600396c4e92c50d0861f01a0dde 192.168.41.128:6381@16381 master - 0 1629358091025 3 connected 10923-16383
699e2ff7c0098cc1cee1ddae640b4bbb66747c72 192.168.41.128:6379@16379 myself,master - 0 1629358091000 1 connected 0-5460

# 测试,连接集群,set数据,别的节点可以get到数据
root@node1:/data# redis-cli -c
127.0.0.1:6379> set hello world
OK

3主3从模式

# 主备6个redis节点
docker run -d --name redis-node1 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-1.conf --port 6379
docker run -d --name redis-node2 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-2.conf --port 6380
docker run -d --name redis-node3 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-3.conf --port 6381
docker run -d --name redis-node4 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-4.conf --port 6382
docker run -d --name redis-node5 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-5.conf --port 6383
docker run -d --name redis-node6 --net host redis:6.2.5 --cluster-enabled yes --cluster-config-file node-6.conf --port 6384

# 进入任一节点
docker exec -it redis-node1 /bin/bash

# 创建集群
redis-cli --cluster create 192.168.41.128:6379 192.168.41.128:6380 192.168.41.128:6381 192.168.41.128:6382 192.168.41.128:6383 192.168.41.128:6384 --cluster-replicas 1

# 查看集群状态
root@node1:/data# redis-cli 
127.0.0.1:6379> cluster nodes
95b197193ee57fb0f331ee03df671b0e50d293c2 192.168.41.128:6380@16380 master - 0 1629359633234 2 connected 5461-10922
10678b7ed61dbcd923472a272b6d365222fdad95 192.168.41.128:6384@16384 slave 345dd13e0bbf74a09ebe0011302f0ba8a011c726 0 1629359633000 3 connected
34e4d85bbdebc619ed4e9a183a0939db0ad82d9a 192.168.41.128:6382@16382 slave 75325c54ff09715263c514bde71d6582fde2b8a8 0 1629359632000 1 connected
345dd13e0bbf74a09ebe0011302f0ba8a011c726 192.168.41.128:6381@16381 master - 0 1629359635272 3 connected 10923-16383
75325c54ff09715263c514bde71d6582fde2b8a8 192.168.41.128:6379@16379 myself,master - 0 1629359634000 1 connected 0-5460
98b18508d3248973c3d28930b7a571d4564c8e2a 192.168.41.128:6383@16383 slave 95b197193ee57fb0f331ee03df671b0e50d293c2 0 1629359635000 2 connected

问题现象

master节点显示NotReady,不可调度,原本在master节点上运行的corecdn,各类DeamonSet服务都显示异常,查看kubelet服务发现启动报错.

通过命令journalctl -u kubelet -f查看kubelet启动报错的详细日志,发现主要原因是找不到文件/etc/kubernetes/bootstrap-kubelet.conf.

解决

  1. 从别的节点拷入/etc/kubernetes/bootstrap-kubelet.conf到master节点.
  2. 此时kubelet可以启动但是 任然报错
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.553681    1757 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User "system:anonymous" cannot list resource "runtimeclasses" in API group "node.k8s.io" at the cluster scope
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.631490    1757 kubelet.go:2248] node "master" not found
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.731614    1757 kubelet.go:2248] node "master" not found
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.753619    1757 reflector.go:125] k8s.io/kubernetes/pkg/kubelet/kubelet.go:444: Failed to list *v1.Service: services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.831748    1757 kubelet.go:2248] node "master" not found
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.931869    1757 kubelet.go:2248] node "master" not found
Aug 11 11:18:06 master kubelet[1757]: E0811 11:18:06.954503    1757 reflector.go:125] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: pods is forbidden: User "system:anonymous" cannot list resource "pods" in API group "" at the cluster scope
  1. 给匿名用户赋予权限
kubectl create clusterrolebinding test:anonymous --clusterrole=cluster-admin --user=system:anonymous

在varchar字段上建立索引时,没必要对全字段建立索引,根据实际文本区分度决定索引长度。

区分度

-- 长度20
mysql> select count(distinct left(`column_name`,20))/count(*) from table_name;

+-------------------------------------------------+

| count(distinct left(`column_name`,20))/count(*) |

+-------------------------------------------------+

|                                          0.8288 | -- 区分度

+-------------------------------------------------+

1 row in set (0.01 sec)

长度越长,区分度越大,选择最合适的长度,一般90%左右即可.

创建索引

alter table table_name add index index_column(`column_name`(20));

依赖

<!-- grpc相关依赖 -->
<dependency>
    <groupId>com.google.protobuf</groupId>
    <artifactId>protobuf-java</artifactId>
    <version>3.6.1</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-protobuf</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-stub</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-netty</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-netty-shaded</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-okhttp</artifactId>
    <version>1.23.0</version>
</dependency>

插件

<build>
    <extensions>
        <extension>
            <groupId>kr.motd.maven</groupId>
            <artifactId>os-maven-plugin</artifactId>
            <version>1.5.0.Final</version>
        </extension>
    </extensions>
    <plugins>
        <plugin>
            <groupId>org.xolstice.maven.plugins</groupId>
            <artifactId>protobuf-maven-plugin</artifactId>
            <version>0.5.1</version>
            <configuration>
                <protocArtifact>com.google.protobuf:protoc:3.5.1-1:exe:${os.detected.classifier}</protocArtifact>
                <pluginId>grpc-java</pluginId>
                <pluginArtifact>io.grpc:protoc-gen-grpc-java:1.14.0:exe:${os.detected.classifier}</pluginArtifact>
            </configuration>
            <executions>
                <execution>
                    <goals>
                        <goal>compile</goal>
                        <goal>compile-custom</goal>
                    </goals>
                </execution>
            </executions>
        </plugin>
    </plugins>
</build>

设计proto

src/main目录(java同级目录)下新建proto目录.
proto目录下新建文件hello.proto

syntax = "proto3";

option java_multiple_files = true;
option java_package = "com.example.java.grpc";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "GRPC";

package common;

service HelloService {
    rpc SayHello (Hello) returns (Response) {
    }
}

message Hello {
    string name = 1;
}

message Response {
    string content = 1;
}

生成Java文件

mvn protobuf:compile
mvn protobuf:compile-custom

编写服务端

server:
  grpc:
    port: 9090
@Service
public class GrpcServer {
    // 配置文件中定义端口号
    @Value("${server.grpc.port}")
    Integer port;
    private Server server;

    @PostConstruct
    protected void start() throws IOException {
        server = ServerBuilder.forPort(port)
                .addService(new HelloServiceImpl())
                .build()
                .start();
        log.info("Grpc Server started, listening on " + port);
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            System.err.println("shutting down gRPC java server ...");
            GrpcServer.this.stop();
        }));
    }

    private void stop() {
        if (server != null) {
            server.shutdown();
        }
    }

    protected void blockUntilShutdown() throws InterruptedException {
        if (server != null) {
            server.awaitTermination();
        }
    }

    private class HelloServiceImpl extends HelloServiceGrpc.HelloServiceImplBase {

        @Override
        public void sayHello(Hello req, StreamObserver<Response> responseObserver) {
            Response reply = Response.newBuilder().setContent(("Response Message from Java gRPC-Server: Hello " + req.getName())).build();
            responseObserver.onNext(reply);
            responseObserver.onCompleted();
        }
    }

}

编写客户端

@Slf4j
public class GrpcClient {
    private ManagedChannel channel;
    private HelloServiceGrpc.HelloServiceBlockingStub blockingStub;

    public GrpcClient(String host, int port) {
        channel = ManagedChannelBuilder.forAddress(host, port)
                .usePlaintext(true)
                .build();
        blockingStub = HelloServiceGrpc.newBlockingStub(channel);
    }


    public void shutdown() throws InterruptedException {
        channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
    }

    public String sayHello(String name) {
        Hello request = Hello.newBuilder().setName(name).build();
        Response response;
        try {
            response = blockingStub.sayHello(request);
            return response.getContent();
        } catch (StatusRuntimeException e) {
            log.warn("RPC failed: {}", e.getStatus());
            return "RPC failed";
        }
    }
}

安装

# 1. 检查磁盘是否满足条件
lsblk -f

# 2. clone相关配置
git clone --single-branch --branch master https://github.com/rook/rook.git

# 3. 进入rook ceph目录
cd rook/cluster/examples/kubernetes/ceph

# 4. 安装crd,operator
kubectl create -f crds.yaml -f common.yaml -f operator.yaml

# 5. 检查pod运行情况
kubectl -n rook-ceph get pod

# 6. 安装ceph cluster
kubectl create -f cluster.yaml

# 7. 检查pod运行情况
kubectl -n rook-ceph get pod

安装ceph tool

保存toolbox.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: rook-ceph-tools
  namespace: rook-ceph
  labels:
    app: rook-ceph-tools
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rook-ceph-tools
  template:
    metadata:
      labels:
        app: rook-ceph-tools
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: rook-ceph-tools
        image: rook/ceph:master
        command: ["/tini"]
        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
        imagePullPolicy: IfNotPresent
        env:
          - name: ROOK_CEPH_USERNAME
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-username
          - name: ROOK_CEPH_SECRET
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-secret
        volumeMounts:
          - mountPath: /etc/ceph
            name: ceph-config
          - name: mon-endpoint-volume
            mountPath: /etc/rook
      volumes:
        - name: mon-endpoint-volume
          configMap:
            name: rook-ceph-mon-endpoints
            items:
            - key: data
              path: mon-endpoints
        - name: ceph-config
          emptyDir: {}
      tolerations:
        - key: "node.kubernetes.io/unreachable"
          operator: "Exists"
          effect: "NoExecute"
          tolerationSeconds: 5
# 1. 安装
kubectl create -f toolbox.yaml

# 2. 进入容器
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash

# 3. 执行相关命令
ceph status
ceph osd status
ceph df
rados df

block存储(被单个pod使用)

kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml

Shared Filesystem存储(被多个pod共享使用)

  • filesystem.yaml
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
  name: myfs
  namespace: rook-ceph
spec:
  metadataPool:
    replicated:
      size: 3
  dataPools:
    - replicated:
        size: 3
  preserveFilesystemOnDelete: true
  metadataServer:
    activeCount: 1
    activeStandby: true
# 1. 
kubectl create -f filesystem.yaml

# 2. 
kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml

配置自动扩容

新增一块设备后能被自动发现

# 开启自动发现磁盘
# vim operator.yaml
ROOK_ENABLE_DISCOVERY_DAEMON: "true"