0%

本文使用kubernetes环境: v1.19,不是这个版本apiVersion可能不一样.

说明

之前通过helm方式安装ingress-nginx,具体参考3.2 使用Helm部署Nginx Ingress.

但是现在http://mirror.azure.cn/kubernetes/charts仓库已经无法使用.本文通过yaml文件安装.

创建serviceaccount

这里没有梳理可能用到的权限,所以直接赋予了全部权限.

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress
  namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nginx-ingress-normal
rules:
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - ""
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - ""
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'

---

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: nginx-ingress-minimal
  namespace: ingress-nginx
rules:
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    resourceNames:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'
  - apiGroups:
      - '*'
    resources:
      - '*'
    verbs:
      - '*'

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: nginx-ingress-minimal
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-minimal
subjects:
  - kind: ServiceAccount
    name: nginx-ingress
    namespace: ingress-nginx

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-normal
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-normal
subjects:
  - kind: ServiceAccount
    name: nginx-ingress
    namespace: ingress-nginx

创建configmap

apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx

创建nginx-ingress-default-backend

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: default-backend
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-default-backend
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-ingress
      release: nginx-ingress
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: nginx-ingress
        component: default-backend
        release: nginx-ingress
    spec:
      serviceAccount: nginx-ingress
      containers:
      - image: hub.deri.org.cn/k8s/defaultbackend-amd64:1.5
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 5
        name: nginx-ingress-default-backend
        ports:
        - containerPort: 8080
          name: http
          protocol: TCP
        readinessProbe:
          failureThreshold: 6
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          periodSeconds: 5
          successThreshold: 1
          timeoutSeconds: 5
        resources: {}
        securityContext:
          runAsUser: 65534
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      nodeSelector:
        node-role.kubernetes.io/edge: ""
      restartPolicy: Always
      schedulerName: default-scheduler
      terminationGracePeriodSeconds: 60
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists

---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: default-backend
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-default-backend
  namespace: ingress-nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: http
  selector:
    app: nginx-ingress
    component: default-backend
    release: nginx-ingress
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

创建nginx-ingress-controller

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: controller
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-ingress
      release: nginx-ingress
  template:
    metadata:
      labels:
        app: nginx-ingress
        component: controller
        release: nginx-ingress
    spec:
      serviceAccount: nginx-ingress
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - nginx-ingress
              - key: component
                operator: In
                values:
                - controller
            topologyKey: kubernetes.io/hostname
      containers:
      - args:
        - /nginx-ingress-controller
        - --default-backend-service=ingress-nginx/nginx-ingress-default-backend
        - --election-id=ingress-controller-leader
        - --ingress-class=nginx
        - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
        - --configmap=ingress-nginx/nginx-ingress-controller
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        image: hub.deri.org.cn/k8s/nginx-ingress-controller:0.26.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: nginx-ingress-controller
        ports:
        - containerPort: 80
          hostPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          hostPort: 443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources: {}
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          runAsUser: 33
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      hostNetwork: true
      nodeSelector:
        node-role.kubernetes.io/edge: ""
      restartPolicy: Always
      schedulerName: default-scheduler
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists

---

apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.24.5
    component: controller
    heritage: Tiller
    release: nginx-ingress
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    nodePort: 31967
    port: 80
    protocol: TCP
    targetPort: http
  - name: https
    nodePort: 31276
    port: 443
    protocol: TCP
    targetPort: https
  - name: 30107-tcp
    nodePort: 32354
    port: 30107
    protocol: TCP
    targetPort: 30107-tcp
  selector:
    app: nginx-ingress
    component: controller
    release: nginx-ingress
  sessionAffinity: None
  type: LoadBalancer
status:
  loadBalancer: {}

插件说明

当我们部署的DeploymentConfig, Deployment, Daemonset, Statefulset等服务所挂载的ConfigMapSecret发生变化的时候,我们的服务可以自动更新.

插件安装

# kubernetes >= 1.9
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml

使用

  • 所有更新自动加载,包括ConfigMap和Secret
kind: Deployment
metadata:
  annotations:
    reloader.stakater.com/auto: "true"
spec:
  template: metadata:
  • 匹配模式
# 开启search
kind: Deployment
metadata:
  annotations:
    reloader.stakater.com/search: "true"
spec:
  template:
# ConfigMap or Secret 开启match
kind: ConfigMap
metadata:
  annotations:
    reloader.stakater.com/match: "true"
data:
  key: value
  • 特定Configmap
kind: Deployment
metadata:
  annotations:
    configmap.reloader.stakater.com/reload: "foo-configmap"
spec:
  template: metadata:
kind: Deployment
metadata:
  annotations:
    configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
spec:
  template: metadata:
  • 特定secret
kind: Deployment
metadata:
  annotations:
    secret.reloader.stakater.com/reload: "foo-secret"
spec:
  template: metadata:
kind: Deployment
metadata:
  annotations:
    secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
spec:
  template: metadata:

NTP安装

服务端和客户端都需要安装,需关闭防火墙或开通端口

# 安装
yum install ntp ntpdate -y

# 设置开机自启动
systemctl enable ntpd

NTP服务端

# 修改配置
vim /etc/ntp.conf

# 注释默认的,改成本地IP
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 127.0.0.1 iburst

# 重启服务
systemctl restart ntpd
systemctl status ntpd
# 查看同步情况
ntpq -p

NTP客户端

# 修改配置
vim /etc/ntp.conf

# 注释掉默认的
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
#修改为本地的ntpd Server服务器
server 192.168.3.13 iburst
#配置允许上游时间服务器主动修改本机的时间
restrict 192.168.3.13 nomodify notrap noquery

# 重启服务
systemctl restart ntpd
systemctl status ntpd
# 查看同步情况
ntpq -p

timedatectl

# timedatectl显示synchronized一直处于no状态
[root@node2 ~]# timedatectl 
      Local time: Mon 2021-07-12 10:27:02 CST
  Universal time: Mon 2021-07-12 02:27:02 UTC
        RTC time: Mon 2021-07-12 02:27:02
       Time zone: Asia/Shanghai (CST, +0800)
     NTP enabled: yes
NTP synchronized: no
 RTC in local TZ: no
      DST active: n/a

# 解决办法,等一会儿就好了
systemctl stop ntpd
ntpd -gq
systemctl start ntpd

新增一块存储设备

# 新增完之后可以用以下命令确认
[root@node1 ~]# fdisk -l

磁盘 /dev/sda:53.7 GB, 53687091200 字节,104857600 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节
磁盘标签类型:dos
磁盘标识符:0x0009f53f

   设备 Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     2099199     1048576   83  Linux
/dev/sda2         2099200   104857599    51379200   8e  Linux LVM

# /dev/sdb 磁盘下无分区
磁盘 /dev/sdb:16.1 GB, 16106127360 字节,31457280 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节
磁盘标签类型:dos
磁盘标识符:0xefe51d7a
# 使用下面的命令 一样可以确认, sdb设备FSTYPE等信息为空
[root@node1 ~]# lsblk -f
NAME            FSTYPE      LABEL           UUID                                   MOUNTPOINT
sda                                                                                
├─sda1          xfs                         caa5e7a7-d9b3-4bcb-adc2-409153a686ad   /boot
└─sda2          LVM2_member                 VHdTmP-vi9x-LVfT-Tp2s-SAnp-CPlq-4ChakQ 
  ├─centos-root xfs                         e5b9a4e0-95a1-40f1-b7aa-1ce69e51c1d4   /
  └─centos-swap swap                        5d1b3899-2c21-4886-a187-31a644ef32a7   [SWAP]
sdb                                                                                
sr0             iso9660     CentOS 7 x86_64 2018-11-25-23-54-16-00  

为磁盘设置分区

# 1. 进入分区设置
[root@node1 ~]# fdisk /dev/sdb
欢迎使用 fdisk (util-linux 2.23.2)。

更改将停留在内存中,直到您决定将更改写入磁盘。
使用写入命令前请三思。


命令(输入 m 获取帮助)# 2. 输入n创建一个新的分区
命令(输入 m 获取帮助):n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): 

# 3. 输入p创建一个主分区, 注:硬盘主分区最多为4个,分区号从1到4,逻辑分区从5开始
分区号 (1-4,默认 1)# 4. 输入分区起始扇区, 默认开始位置
起始 扇区 (2048-31457279,默认为 2048)# 5. 输入分区结束扇区,默认结束位置, 可以指定大小, 如 +3G
Last 扇区, +扇区 or +size{K,M,G} (2048-31457279,默认为 31457279)# 6. 输入p确认分区
命令(输入 m 获取帮助):p

磁盘 /dev/sdb:16.1 GB, 16106127360 字节,31457280 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节
磁盘标签类型:dos
磁盘标识符:0xefe51d7a

   设备 Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048    31457279    15727616   83  Linux

# 7. 输入w保存退出
命令(输入 m 获取帮助):w
The partition table has been altered!

Calling ioctl() to re-read partition table.
正在同步磁盘。
  • 删除分区
# 1. 进入分区设置
[root@node1 ~]# fdisk /dev/sdb
欢迎使用 fdisk (util-linux 2.23.2)。

更改将停留在内存中,直到您决定将更改写入磁盘。
使用写入命令前请三思。


命令(输入 m 获取帮助)# 2. 输入d,选择分区号删除分区
  • 取消挂载
# 取消挂载点
umount /dev/sdb1

格式化分区

# 1. 格式化:centos7.0开始默认文件系统是xfs, centos6是ext4,centos5是ext3
[root@node1 ~]# mkfs -t xfs /dev/sdb1
mkfs.xfs: /dev/sdb1 appears to contain an existing filesystem (xfs).
mkfs.xfs: Use the -f option to force overwrite.
# 2. 确认格式化结果
[root@node1 ~]# lsblk -f
NAME            FSTYPE      LABEL           UUID                                   MOUNTPOINT
sda                                                                                
├─sda1          xfs                         caa5e7a7-d9b3-4bcb-adc2-409153a686ad   /boot
└─sda2          LVM2_member                 VHdTmP-vi9x-LVfT-Tp2s-SAnp-CPlq-4ChakQ 
  ├─centos-root xfs                         e5b9a4e0-95a1-40f1-b7aa-1ce69e51c1d4   /
  └─centos-swap swap                        5d1b3899-2c21-4886-a187-31a644ef32a7   [SWAP]
sdb                                                                                
└─sdb1          xfs                         9a382567-246a-4c52-9451-fe819a9ee297   
sr0             iso9660     CentOS 7 x86_64 2018-11-25-23-54-16-00                 

挂载分区

# 1. 创建目录
[root@node1 ~]# mkdir /newdisk

# 2. 临时挂载分区,重启后失效
mount /dev/sdb1 /newdisk

# 3. 永久挂载
vim /etc/fstab

# 4. /etc/fstab增加下面一行内容
/dev/sdb1 /newdisk xfs defaults 0 0

# 5. 生效
mount -a

# 6. 确认
[root@node1 ~]# df -h
文件系统                 容量  已用  可用 已用% 挂载点
/dev/sdb1                 15G   33M   15G    1% /newdisk

语法

- 格式: {{ 模板表达式 }}
- 注释格式: {{/* 注释语法 */}}
- {{.字段名}}
- {{.字段名1.字段名2}}

减号

- 在左边增加减号和空格,表示删除左边空格: {{- 模板表达式 }}
- 在右边增加空格和减号,表示删除右边空格: {{ 模板表达式 -}}
- 删除表达式左右两边空格的写法: {{- 模板表达式 -}}

变量

定义变量
$title := "标题"

为变量赋值, 第二次为变量赋值,不需要冒号:
$title = "新标题"

引用变量
{{$title}}

流程

if

// 语法格式1:表达式为真,则执输出T1
{{if 表达式}} T1 {{end}}
// 语法格式2:表达式为真,则执输出T1, 否则输出T0
{{if 表达式}} T1 {{else}} T0 {{end}}
// 语法格式3:表达式1为真,则执输出T1, 否则如果表达式2为真,则输出T0
{{if 表达式1}} T1 {{else if 表达式2}} T0 {{end}}

range

titles := []string{"标题1", "标题2", "标题3"}
{{range .}}
{{.}}
{{end}}
{{range $index, $element := 数组或者map的引用}}
索引: {{$index}}
元素值: {{$element}}
{{end}}

with

// with语句主要用于struct类型数据的访问
user := User{Id:1001, UserName:"李大成"}
{{with .User}}
Id: {{.Id}}
Username: {{.UserName}}
{{end}}

模板

// 定义
{{define "子模板名字"}}
模板内容
{{end}}
// 引用
{{template "子模板名字" 参数}}

函数

关系运算函数

函数名 函数调用格式 对应关系运算 说明
eq eq arg1 arg2 arg1 == arg2 arg1等于arg2则返回true
ne ne arg1 arg2 arg1 != arg2 arg1不等于arg2则返回true
lt lt arg1 arg2 arg1 < arg2 arg1小于arg2则返回true
le le arg1 arg2 arg1 <= arg2 arg1小于等于arg2则返回true
gt gt arg1 arg2 arg1 > arg2 arg1大于arg2则返回true
ge ge arg1 arg2 arg1 >= arg2 arg1大于等于arg2则返回true

逻辑运算函数

函数名 函数调用格式 对应逻辑运算 说明
and and 表达式1 表达式2 表达式1 && 表达式2 表达式1和表达式2都为真的时候返回true
or or 表达式1 表达式2 表达式1 或者 表达式2 表达式1和表达式2其中一个为真的时候返回true
not not 表达式 !表达式 表达式为false则返回true, 反之返回false

接口说明

springboot集成Prometheus需要开发的接口有:

  • 监控JVM、tomcat等相关的指标;
  • 自定义监控程序相关指标;

监控JVM、tomcat等相关的指标

micrometer已经为我们做好了相关的接口,只需要引入依赖即可.

<!--集成Prometheus-->
<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<dependency>
    <groupId>io.micrometer</groupId>
    <artifactId>micrometer-registry-prometheus</artifactId>
</dependency>

设置application.yml

server:
  port: 9090

spring:
  application:
    name: application-name

management:
  endpoint:
  endpoints:
    web:
      exposure:
        include: '*'
@Bean
MeterRegistryCustomizer<MeterRegistry> configurer(@Value("${spring.application.name}") String applicationName) {
    return registry -> registry.config().commonTags("application", applicationName);
}

启动程序后,访问/actuator/prometheus即可获取相关指标.

使用Micrometer实现方法执行时间监控

@Bean
public TimedAspect timedAspect(MeterRegistry registry) {
    return new TimedAspect(registry);
}
// 类上要开启@Aspect
// 在方法上添加 @Timed 注解即可
@GetMapping("/log/warn")
@Timed(value = "warn_methord",description = "健康检查接口")
public String warn() {
    log.warn("warn msg.");
    return "warn";
}

启动服务后,访问本地的/actuator/prometheus接口,就能看到如下的指标数据了,其中就有我们自定义的warn_methord的三个指标(count sum max)。

# HELP warn_methord_seconds 健康检查接口
# TYPE warn_methord_seconds summary
warn_methord_seconds_count{application="ggis",exception="None",method="GET",status="200",uri="/log/warn",} 3.0
warn_methord_seconds_sum{application="ggis",exception="None",method="GET",status="200",uri="/log/warn",} 0.0208932
# HELP warn_methord_seconds_max 健康检查接口
# TYPE warn_methord_seconds_max gauge
warn_methord_seconds_max{application="ggis",exception="None",method="GET",status="200",uri="/log/warn",} 0.01753

自定义监控程序相关指标

如果上面的接口返回的指标不够用,需要自己开发,可以参考下面的:

@GetMapping(value = "/metrics", produces = "text/plain")
@ResponseBody
String metrics() {
    // 这里产生的随机数,实际按需修改
    return "user_random{application=\"application\"} " + (int)(Math.random()*10);
}

然后配置到Prometheus的Targets中即可.