0%

在varchar字段上建立索引时,没必要对全字段建立索引,根据实际文本区分度决定索引长度。

区分度

-- 长度20
mysql> select count(distinct left(`column_name`,20))/count(*) from table_name;

+-------------------------------------------------+

| count(distinct left(`column_name`,20))/count(*) |

+-------------------------------------------------+

|                                          0.8288 | -- 区分度

+-------------------------------------------------+

1 row in set (0.01 sec)

长度越长,区分度越大,选择最合适的长度,一般90%左右即可.

创建索引

alter table table_name add index index_column(`column_name`(20));

依赖

<!-- grpc相关依赖 -->
<dependency>
    <groupId>com.google.protobuf</groupId>
    <artifactId>protobuf-java</artifactId>
    <version>3.6.1</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-protobuf</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-stub</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-netty</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-netty-shaded</artifactId>
    <version>1.23.0</version>
</dependency>
<dependency>
    <groupId>io.grpc</groupId>
    <artifactId>grpc-okhttp</artifactId>
    <version>1.23.0</version>
</dependency>

插件

<build>
    <extensions>
        <extension>
            <groupId>kr.motd.maven</groupId>
            <artifactId>os-maven-plugin</artifactId>
            <version>1.5.0.Final</version>
        </extension>
    </extensions>
    <plugins>
        <plugin>
            <groupId>org.xolstice.maven.plugins</groupId>
            <artifactId>protobuf-maven-plugin</artifactId>
            <version>0.5.1</version>
            <configuration>
                <protocArtifact>com.google.protobuf:protoc:3.5.1-1:exe:${os.detected.classifier}</protocArtifact>
                <pluginId>grpc-java</pluginId>
                <pluginArtifact>io.grpc:protoc-gen-grpc-java:1.14.0:exe:${os.detected.classifier}</pluginArtifact>
            </configuration>
            <executions>
                <execution>
                    <goals>
                        <goal>compile</goal>
                        <goal>compile-custom</goal>
                    </goals>
                </execution>
            </executions>
        </plugin>
    </plugins>
</build>

设计proto

src/main目录(java同级目录)下新建proto目录.
proto目录下新建文件hello.proto

syntax = "proto3";

option java_multiple_files = true;
option java_package = "com.example.java.grpc";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "GRPC";

package common;

service HelloService {
    rpc SayHello (Hello) returns (Response) {
    }
}

message Hello {
    string name = 1;
}

message Response {
    string content = 1;
}

生成Java文件

mvn protobuf:compile
mvn protobuf:compile-custom

编写服务端

server:
  grpc:
    port: 9090
@Service
public class GrpcServer {
    // 配置文件中定义端口号
    @Value("${server.grpc.port}")
    Integer port;
    private Server server;

    @PostConstruct
    protected void start() throws IOException {
        server = ServerBuilder.forPort(port)
                .addService(new HelloServiceImpl())
                .build()
                .start();
        log.info("Grpc Server started, listening on " + port);
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            System.err.println("shutting down gRPC java server ...");
            GrpcServer.this.stop();
        }));
    }

    private void stop() {
        if (server != null) {
            server.shutdown();
        }
    }

    protected void blockUntilShutdown() throws InterruptedException {
        if (server != null) {
            server.awaitTermination();
        }
    }

    private class HelloServiceImpl extends HelloServiceGrpc.HelloServiceImplBase {

        @Override
        public void sayHello(Hello req, StreamObserver<Response> responseObserver) {
            Response reply = Response.newBuilder().setContent(("Response Message from Java gRPC-Server: Hello " + req.getName())).build();
            responseObserver.onNext(reply);
            responseObserver.onCompleted();
        }
    }

}

编写客户端

@Slf4j
public class GrpcClient {
    private ManagedChannel channel;
    private HelloServiceGrpc.HelloServiceBlockingStub blockingStub;

    public GrpcClient(String host, int port) {
        channel = ManagedChannelBuilder.forAddress(host, port)
                .usePlaintext(true)
                .build();
        blockingStub = HelloServiceGrpc.newBlockingStub(channel);
    }


    public void shutdown() throws InterruptedException {
        channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
    }

    public String sayHello(String name) {
        Hello request = Hello.newBuilder().setName(name).build();
        Response response;
        try {
            response = blockingStub.sayHello(request);
            return response.getContent();
        } catch (StatusRuntimeException e) {
            log.warn("RPC failed: {}", e.getStatus());
            return "RPC failed";
        }
    }
}

安装

# 1. 检查磁盘是否满足条件
lsblk -f

# 2. clone相关配置
git clone --single-branch --branch master https://github.com/rook/rook.git

# 3. 进入rook ceph目录
cd rook/cluster/examples/kubernetes/ceph

# 4. 安装crd,operator
kubectl create -f crds.yaml -f common.yaml -f operator.yaml

# 5. 检查pod运行情况
kubectl -n rook-ceph get pod

# 6. 安装ceph cluster
kubectl create -f cluster.yaml

# 7. 检查pod运行情况
kubectl -n rook-ceph get pod

安装ceph tool

保存toolbox.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: rook-ceph-tools
  namespace: rook-ceph
  labels:
    app: rook-ceph-tools
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rook-ceph-tools
  template:
    metadata:
      labels:
        app: rook-ceph-tools
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: rook-ceph-tools
        image: rook/ceph:master
        command: ["/tini"]
        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
        imagePullPolicy: IfNotPresent
        env:
          - name: ROOK_CEPH_USERNAME
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-username
          - name: ROOK_CEPH_SECRET
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-secret
        volumeMounts:
          - mountPath: /etc/ceph
            name: ceph-config
          - name: mon-endpoint-volume
            mountPath: /etc/rook
      volumes:
        - name: mon-endpoint-volume
          configMap:
            name: rook-ceph-mon-endpoints
            items:
            - key: data
              path: mon-endpoints
        - name: ceph-config
          emptyDir: {}
      tolerations:
        - key: "node.kubernetes.io/unreachable"
          operator: "Exists"
          effect: "NoExecute"
          tolerationSeconds: 5
# 1. 安装
kubectl create -f toolbox.yaml

# 2. 进入容器
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash

# 3. 执行相关命令
ceph status
ceph osd status
ceph df
rados df

block存储(被单个pod使用)

kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml

Shared Filesystem存储(被多个pod共享使用)

  • filesystem.yaml
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
  name: myfs
  namespace: rook-ceph
spec:
  metadataPool:
    replicated:
      size: 3
  dataPools:
    - replicated:
        size: 3
  preserveFilesystemOnDelete: true
  metadataServer:
    activeCount: 1
    activeStandby: true
# 1. 
kubectl create -f filesystem.yaml

# 2. 
kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml

配置自动扩容

新增一块设备后能被自动发现

# 开启自动发现磁盘
# vim operator.yaml
ROOK_ENABLE_DISCOVERY_DAEMON: "true"

本文使用kubernetes环境: v1.19,不是这个版本apiVersion可能不一样.

说明

之前通过helm方式安装ingress-nginx,具体参考3.2 使用Helm部署Nginx Ingress.

但是现在http://mirror.azure.cn/kubernetes/charts仓库已经无法使用.本文通过yaml文件安装.

创建serviceaccount

这里没有梳理可能用到的权限,所以直接赋予了全部权限.

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress
  namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nginx-ingress-normal
rules:
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - ""
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - ""
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'

---

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: nginx-ingress-minimal
  namespace: ingress-nginx
rules:
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    resourceNames:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: nginx-ingress-minimal
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-minimal
subjects:
  - kind: ServiceAccount
    name: nginx-ingress
    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-normal
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-normal
subjects:
  - kind: ServiceAccount
    name: nginx-ingress
    namespace: ingress-nginx

创建configmap

apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx

创建nginx-ingress-default-backend

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: default-backend
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-default-backend
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-ingress
      release: nginx-ingress
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: nginx-ingress
        component: default-backend
        release: nginx-ingress
    spec:
      serviceAccount: nginx-ingress
      containers:
      - image: hub.deri.org.cn/k8s/defaultbackend-amd64:1.5
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 5
        name: nginx-ingress-default-backend
        ports:
        - containerPort: 8080
          name: http
          protocol: TCP
        readinessProbe:
          failureThreshold: 6
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          periodSeconds: 5
          successThreshold: 1
          timeoutSeconds: 5
        resources: {}
        securityContext:
          runAsUser: 65534
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      nodeSelector:
        node-role.kubernetes.io/edge: ""
      restartPolicy: Always
      schedulerName: default-scheduler
      terminationGracePeriodSeconds: 60
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists

---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: default-backend
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-default-backend
  namespace: ingress-nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  selector:
    app: nginx-ingress
    component: default-backend
    release: nginx-ingress
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

创建nginx-ingress-controller

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: controller
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-ingress
      release: nginx-ingress
  template:
    metadata:
      labels:
        app: nginx-ingress
        component: controller
        release: nginx-ingress
    spec:
      serviceAccount: nginx-ingress
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - nginx-ingress
              - key: component
                operator: In
                values:
                - controller
            topologyKey: kubernetes.io/hostname
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=ingress-nginx/nginx-ingress-default-backend
        - --election-id=ingress-controller-leader
        - --ingress-class=nginx
        - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
        - --configmap=ingress-nginx/nginx-ingress-controller
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: hub.deri.org.cn/k8s/nginx-ingress-controller:0.26.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: nginx-ingress-controller
        ports:
        - containerPort: 80
          hostPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          hostPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources: {}
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          runAsUser: 33
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      hostNetwork: true
      nodeSelector:
        node-role.kubernetes.io/edge: ""
      restartPolicy: Always
      schedulerName: default-scheduler
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists

---

apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: controller
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    nodePort: 31967
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    nodePort: 31276
    port: 443
    protocol: TCP
    targetPort: https
  - name: 30107-tcp
    nodePort: 32354
    port: 30107
    protocol: TCP
    targetPort: 30107-tcp
  selector:
    app: nginx-ingress
    component: controller
    release: nginx-ingress
  sessionAffinity: None
  type: LoadBalancer
status:
  loadBalancer: {}

插件说明

当我们部署的DeploymentConfig, Deployment, Daemonset, Statefulset等服务所挂载的ConfigMapSecret发生变化的时候,我们的服务可以自动更新.

插件安装

# kubernetes >= 1.9
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml

使用

  • 所有更新自动加载,包括ConfigMap和Secret
kind: Deployment
metadata:
  annotations:
    reloader.stakater.com/auto: "true"
spec:
  template: metadata:
  • 匹配模式
# 开启search
kind: Deployment
metadata:
  annotations:
    reloader.stakater.com/search: "true"
spec:
  template:
# ConfigMap or Secret 开启match
kind: ConfigMap
metadata:
  annotations:
    reloader.stakater.com/match: "true"
data:
  key: value
  • 特定Configmap
kind: Deployment
metadata:
  annotations:
    configmap.reloader.stakater.com/reload: "foo-configmap"
spec:
  template: metadata:
kind: Deployment
metadata:
  annotations:
    configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
spec:
  template: metadata:
  • 特定secret
kind: Deployment
metadata:
  annotations:
    secret.reloader.stakater.com/reload: "foo-secret"
spec:
  template: metadata:
kind: Deployment
metadata:
  annotations:
    secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
spec:
  template: metadata:

NTP安装

服务端和客户端都需要安装,需关闭防火墙或开通端口

# 安装
yum install ntp ntpdate -y

# 设置开机自启动
systemctl enable ntpd

NTP服务端

# 修改配置
vim /etc/ntp.conf

# 注释默认的,改成本地IP
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 127.0.0.1 iburst

# 重启服务
systemctl restart ntpd
systemctl status ntpd
# 查看同步情况
ntpq -p

NTP客户端

# 修改配置
vim /etc/ntp.conf

# 注释掉默认的
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
#修改为本地的ntpd Server服务器
server 192.168.3.13 iburst
#配置允许上游时间服务器主动修改本机的时间
restrict 192.168.3.13 nomodify notrap noquery

# 重启服务
systemctl restart ntpd
systemctl status ntpd
# 查看同步情况
ntpq -p

timedatectl

# timedatectl显示synchronized一直处于no状态
[root@node2 ~]# timedatectl 
      Local time: Mon 2021-07-12 10:27:02 CST
  Universal time: Mon 2021-07-12 02:27:02 UTC
        RTC time: Mon 2021-07-12 02:27:02
       Time zone: Asia/Shanghai (CST, +0800)
     NTP enabled: yes
NTP synchronized: no
 RTC in local TZ: no
      DST active: n/a

# 解决办法,等一会儿就好了
systemctl stop ntpd
ntpd -gq
systemctl start ntpd