MySQL主从容器化

本次项目为自己玩玩

1、构建镜像

# 配置master /etc/mysql/my.cnf
[root@k8s-m-01 ~]# mkdir -pv /root/mysql/master
[root@k8s-m-01 ~]# cd /root/mysql/master
[root@k8s-m-01 ~]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf
[root@k8s-m-01 ~]# vim my.cnf
[mysql]
socket=/var/lib/mysql/mysql.sock

[mysqld]
user=mysql
port=3306
binlog_format=mixed
log_bin=mysql-bin
socket=/var/lib/mysql/mysql.sock
server_id=1
sync_binlog=1
log-slave-updates=on

# 配置savle /etc/mysql/my.cnf
[root@k8s-m-01 ~]# mkdir -pv /root/mysql/savle
[root@k8s-m-01 ~]# cd /root/mysql/savle
[root@k8s-m-01 ~]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf
[root@k8s-m-01 ~]# vim my.cnf
[mysql]
socket=/var/lib/mysql/mysql.sock

[mysqld]
user=mysql
port=3306
binlog_format=mixed
log_bin=mysql-bin
socket=/var/lib/mysql/mysql.sock
server_id=2
read-only=1

# 创建镜像仓库
在阿里云镜像仓库中创建mysql-master和mysql-savle两个仓库

# 构建镜像

# 构建master镜像
[root@k8s-m-01 ~/mysql/master]# docker build -t registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1 .
Sending build context to Docker daemon  3.072kB
Step 1/2 : FROM mysql:5.7
 ---> a70d36bc331a
Step 2/2 : ADD my.cnf /etc/mysql/my.cnf
 ---> 4d70fb217ff7
Successfully built 4d70fb217ff7
Successfully tagged registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

# 构建salve镜像
[root@k8s-m-01 ~/mysql/salve]# docker build -t registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-savle:v1 .
Sending build context to Docker daemon  3.072kB
Step 1/2 : FROM mysql:5.7
 ---> a70d36bc331a
Step 2/2 : ADD my.cnf /etc/mysql/my.cnf
 ---> df6a73e92015
Successfully built df6a73e92015
Successfully tagged registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-savle:v1

# 推送镜像到仓库
[root@k8s-m-01 ~/mysql/salve]#  docker login --username=yangyang091022 registry.cn-hangzhou.aliyuncs.com
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@k8s-m-01 ~/mysql/salve]# docker push registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

[root@k8s-m-01 ~/mysql/salve]# docker push registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-salve:v1

2、配置master节点

# 部署master节点

# 创建master节点主从复制用户

有一个问题 数据库的id怎么办,在构建镜像的时候 加入启动脚本
FROM registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

ADD start /root/

WORKDIR /root

CMD "./start"
namespace
kind: Namespace
apiVersion: v1
metadata:
  name: mysql-cluster
kind: Service
apiVersion: v1
metadata:
  name: mysql-cluster-master-svc
  namespace: mysql-cluster
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql-cluster-master-tcp
  selector:
    app: mysql-cluster
    deploy: mysql-cluster-master
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
  name: mysql-cluster-master
  namespace: mysql-cluster
spec:
  selector:
    matchLabels:
      app: mysql-cluster
      deploy: mysql-cluster-master
  serviceName: mysql-cluster-master-svc
  template:
    metadata:
      labels:
        app: mysql-cluster
        deploy: mysql-cluster-master
    spec:
      imagePullSecrets:
        - name: "myregistrykey"
      containers:
        - name: mysql
          image: registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          livenessProbe:
            tcpSocket:
              port: 3306
            initialDelaySeconds: 30
            periodSeconds: 2

          readinessProbe:
            tcpSocket:
              port: 3306
            initialDelaySeconds: 30
            periodSeconds: 2

          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-cluster-master-data
      volumes:
        - name: mysql-cluster-master-data
          persistentVolumeClaim:
            claimName: mysql-cluster-master-data-pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: mysql-cluster
  name: mysql-cluster-master-data-pvc
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "500Gi"

kind: Secret
apiVersion: v1
metadata:
  name: mysql-cluster-master-secret
  namespace: mysql-cluster
data:
  passwd:
---
kind: Job
apiVersion: batch/v1
metadata:
  namespace: mysql-cluster
  name: mysql-cluster-master-create-user-job
spec:
  template:
    metadata:
      labels:
        app: mysql-cluster
        deploy: mysql-cluster-master-job
    spec:
      imagePullSecrets:
        - name: "myregistrykey"
      restartPolicy: OnFailure
      containers:
        - name: mysql
          image: registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          command:
            - "/bin/bash"
            - "-c"
            - |
              /usr/bin/mysql -hmysql-cluster-master-svc.mysql-cluster.svc.cluster.local -uroot -pcat /opt/passwd -e "GRANT REPLICATION SLAVE ON *.* TO 'mysql'@'%' IDENTIFIED BY 'mysql';FLUSH PRIVILEGES;"

          volumeMounts:
            - mountPath: /opt
              name: mysql-cluster-master-secret
      volumes:
        - name: mysql-cluster-master-secret
          secret:
            secretName: mysql-cluster-master-secret
            items:
              - key: passwd
                path: passwd

特殊存储卷

1、emptyDir

2、hostPath

3、pv/pvc

StorageClass

根据pvc的要求,去自动创建符合要求的pv。

1、按照pvc创建pv

2、减少资源浪费

1、部署存储类

https://github.com/helm/helm

# 下载helm(helm相当于kubernetes中的yum)
[root@k8s-m-01 ~]# wget https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz

[root@k8s-m-01 ~]# tar -xf helm-v3.3.4-linux-amd64.tar.gz 
[root@k8s-m-01 ~]# cd linux-amd64/
[root@k8s-m-01 ~]# for i in m1 m2 m3;do scp helm root@$i:/usr/local/bin/; done

# 测试安装
[root@k8s-m-01 ~]# helm 
The Kubernetes package manager

Common actions for Helm:

- helm search:    search for charts
- helm pull:      download a chart to your local directory to view
- helm install:   upload the chart to Kubernetes
- helm list:      list releases of charts

# 安装存储类

## 安装一个helm的存储库
[root@k8s-m-01 ~]# helm repo add ckotzbauer https://ckotzbauer.github.io/helm-charts
"ckotzbauer" has been added to your repositories
[root@k8s-m-01 ~]# helm repo list
NAME        URL                                     
ckotzbauer  https://ckotzbauer.github.io/helm-charts

## 第一种方式:部署nfs客户端及存储类
[root@k8s-m-01 ~]# helm install nfs-client --set nfs.server=172.16.1.51 --set nfs.path=/nfs/v6  ckotzbauer/nfs-client-provisioner
NAME: nfs-client
LAST DEPLOYED: Fri Apr  9 09:33:23 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

## 查看部署结果
[root@k8s-m-01 ~]# kubectl get pods 
NAME                                                 READY   STATUS        RESTARTS   AGE
nfs-client-nfs-client-provisioner-56dddf479f-h9qqb   1/1     Running       0          41s

[root@k8s-m-01 ~]# kubectl get storageclasses.storage.k8s.io 
NAME         PROVISIONER                                       RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-client-nfs-client-provisioner   Delete          Immediate           true                   61s

## 第二种方式:直接使用配置清单(推荐)
accessModes: ReadWriteMany

### 下载包
[root@k8s-m-01 /opt]# helm pull ckotzbauer/nfs-client-provisioner

### 解压
[root@k8s-m-01 /opt]# tar -xf nfs-client-provisioner-1.0.2.tgz 

### 修改values.yaml
[root@k8s-m-01 /opt]# cd nfs-client-provisioner/
[root@k8s-m-01 /opt/nfs-client-provisioner]# vim values.yaml 
nfs:
  server: 172.16.1.51
  path: /nfs/v6
storageClass:
  accessModes: ReadWriteMany
  reclaimPolicy: Retain

### 安装
[root@k8s-m-01 /opt/nfs-client-provisioner]# helm install nfs-client ./
NAME: nfs-client
LAST DEPLOYED: Fri Apr  9 09:45:47 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

# 测试存储类
## 创建pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"

## 查看pv/pvc
[root@k8s-m-01 /opt/discuz]# kubectl get pv,pvc
NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS     CLAIM                      STORAGECLASS   REASON   AGE
persistentvolume/pvc-589b3377-40cf-4f83-ab06-33bbad83013b   18Gi       RWX            Retain           Bound      default/pv-discuz-pvc-sc   nfs-client              2m35s

NAME                                     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/pv-discuz-pvc-sc   Bound    pvc-589b3377-40cf-4f83-ab06-33bbad83013b   18Gi       RWX            nfs-client     2m35s

## 利用存储类部署一个discuz
#########################################################################################
#  1、部署MySQL集群
#     1、创建命名空间
#     2、创建service提供负载均衡
#     3、使用控制器部署MySQL实例
###
#  2、部署Discuz应用
#     1、创建命名空间
#     2、创建Service提供负载均衡(Headless Service)
#     3、创建服务并挂载代码
#     4、创建Ingress,用于域名转发(https)
###
#  3、服务之间的互连
#     1、Discuz连接MySQL  --->  mysql.mysql.svc.cluster.local
#########################################################################################
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql-svc
  namespace: mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
  selector:
    app: mysql
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-mysql-pvc
  namespace: mysql
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "20Gi"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-deployment
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
      deploy: discuz
  template:
    metadata:
      labels:
        app: mysql
        deploy: discuz
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          livenessProbe:
            tcpSocket:
              port: 3306
          readinessProbe:
            tcpSocket:
              port: 3306
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
            - name: MYSQL_DATABASE
              value: "discuz"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-data
      volumes:
        - name: mysql-data
          persistentVolumeClaim:
            claimName: pv-mysql-pvc
---
kind: Namespace
apiVersion: v1
metadata:
  name: discuz
---
kind: Service
apiVersion: v1
metadata:
  name: discuz-svc
  namespace: discuz
spec:
  clusterIP: None
  ports:
    - port: 80
      targetPort: 80
      name: http
  selector:
    app: discuz
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
  namespace: discuz
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz-deployment
  namespace: discuz
spec:
  replicas: 5
  selector:
    matchLabels:
      app: discuz
      deploy: discuz
  template:
    metadata:
      labels:
        app: discuz
        deploy: discuz
    spec:
      nodeName: k8s-m-03
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          livenessProbe:
            tcpSocket:
              port: 9000
          readinessProbe:
            tcpSocket:
              port: 9000
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
        - name: nginx
          image: alvinos/nginx:wordpress-v2
          livenessProbe:
            httpGet:
              port: 80
              path: /
          readinessProbe:
            httpGet:
              port: 80
              path: /
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
      volumes:
        - name: discuz-data
          persistentVolumeClaim:
            claimName: pv-discuz-pvc
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: discuz-ingress
  namespace: discuz
spec:
  tls:
    - hosts:
        - www.discuz.cluster.local.com
      secretName: discuz-secret
  rules:
    - host: www.discuz.cluster.local.com
      http:
        paths:
          - backend:
              serviceName: discuz-svc
              servicePort: 80

3、配置中心

configMap:配置中心

# 创建configmap

## 第一种方式:使用配置清单
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }

## 第二种方式:部署目录
kubectl create configmap nginx-tls  --from-file=./

## 第三种方式:部署文件
[root@k8s-m-01 ~/zs/Nginx]# kubectl create configmap nginx-tls-crt  --from-file=tls.crt 

# 使用configmap
## 使用挂载大方式,将配置文件挂载到容器中
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }
---
kind: Service
apiVersion: v1
metadata:
  name: nginx-config
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30089
  selector:
    app: nginx-config
  type: NodePort
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-config
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nginx-config
spec:
  selector:
    matchLabels:
      app: nginx-config
  template:
    metadata:
      labels:
        app: nginx-config
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config

        - name: nginx
          image: alvinos/nginx:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config

            - mountPath: /etc/nginx/conf.d
              name: nginx-config-configmap

      volumes:
        - name: nginx-config
          persistentVolumeClaim:
            claimName: nginx-config

        - name: nginx-config-configmap
          configMap:
            name: nginx-config
            items:
              - key: default.conf
                path: default.conf
              - key: tls.crt
                path: /etc

# configmap热更新
## 修改configmap中的文件,可以同步到所有的挂载此configmap的容器中(仅仅同步到容器中),但是如果使用subPath参数,则热更新失效。

## configMap挂载会直接覆盖原来的目录,如果不覆盖则需要使用subPath参数(subPath参数只能够针对文件,同时不支持热更新)

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }
  index.php: |
    <?php

    phpinfo();

    ?>
---
kind: Service
apiVersion: v1
metadata:
  name: nginx-config
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30089
  selector:
    app: nginx-config
  type: NodePort
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nginx-config
spec:
  selector:
    matchLabels:
      app: nginx-config
  template:
    metadata:
      labels:
        app: nginx-config
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config-configmap

        - name: nginx
          image: alvinos/nginx:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html/index.php
              name: nginx-config-configmap
              subPath: index.php

            - mountPath: /etc/nginx/conf.d
              name: nginx-config-configmap
      volumes:
        - name: nginx-config-configmap
          configMap:
            name: nginx-config
            items:
              - key: index.php
                path: index.php

4、Secret

Secret用来保存敏感数据,保存之前就必须将文件进行base64加密,挂载到pod中,自动解密。

Secret类型:

    tls: 一般用来部署证书
    Opaque : 一般用来部署密码
    Service Account : 部署kubernetes API认证信息
    kubernetes.io/dockerconfigjson : 部署容器仓库登录信息

apiVersion: v1
kind: Secret
metadata:
  name: test
data:
  name: b2xkYm95Cg==

数据持久化

1、回调钩子

1、PostStart : 启动回调钩子,是在容器启动之后立即执行

2、PreStop : 结束回调钩子,是在容器结束之前立即执行

kind: Deployment
apiVersion: apps/v1
metadata:
  name: lifecycle
spec:
  selector:
    matchLabels:
      app: cycle
  template:
    metadata:
      labels:
        app: cycle
    spec:
      nodeName: k8s-m-01
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/html/
              name: lifecycle-data
          lifecycle:
            postStart:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - "echo 'This is Lifecycle' > /usr/share/nginx/html/index.html"
            preStop:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - "echo 'This is Lifecycle preStop' >> /usr/share/nginx/html/index.html"
      volumes:
        - name: lifecycle-data
          hostPath:
            path: /opt/discuz/data

2、数据持久化

1、emptyDir : 是pod调度到节点上时创建的一个空目录,当pod被删除时,emptyDir中的数据也随即被删除,emptyDir长用于容器间分享文件,或者用于创建临时目录。

注:emptyDir不能够用来做数据持久化

kind: Deployment
apiVersion: apps/v1
metadata:
  name: emptydir
spec:
  selector:
    matchLabels:
      app: emptydir
  template:
    metadata:
      labels:
        app: emptydir
    spec:
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/nginx
              name: test-emptydir
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /usr/share/nginx
              name: test-emptydir

      volumes:
        - name: test-emptydir
          emptyDir: {}

2、hostPath
    hostPath类似于docker -v参数,将宿主主机中的文件挂载pod中,但是hostPath比docker -v参数更强大,(Pod调度到哪个节点,则直接挂载到当前节点上)

kind: Deployment
apiVersion: apps/v1
metadata:
  name: hostpath
spec:
  selector:
    matchLabels:
      app: hostpath
  template:
    metadata:
      labels:
        app: hostpath
    spec:
      nodeName: k8s-m-01
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/nginx
              name: test-hostpath
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /usr/share/nginx
              name: test-hostpath

      volumes:
        - name: test-hostpath
          hostPath:
            path: /opt/hostpath
            type: DirectoryOrCreate

3、pv/PVC

disk  --->  100G    k8s-m-01
    pv1 ---> 50G
    pv2 ---> 30G
    pv3 ---> 20G

pod   --->  10G     k8s-m-02

    1. 在所有节点上安装nfs
        yum install nfs-utils.x86_64 -y

    2. 配置
    [root@kubernetes-master-01 nfs]# mkdir -p /nfs/v{1..10}
    [root@kubernetes-master-01 nfs]# cat > /etc/exports <<EOF
    /nfs/v1  172.16.1.0/24(rw,no_root_squash)
    /nfs/v2  172.16.1.0/24(rw,no_root_squash)
    /nfs/v3  172.16.1.0/24(rw,no_root_squash)
    /nfs/v4  172.16.1.0/24(rw,no_root_squash)
    /nfs/v5  172.16.1.0/24(rw,no_root_squash)
    /nfs/v6  172.16.1.0/24(rw,no_root_squash)
    /nfs/v7  172.16.1.0/24(rw,no_root_squash)
    /nfs/v8  172.16.1.0/24(rw,no_root_squash)
    /nfs/v9  172.16.1.0/24(rw,no_root_squash)
    /nfs/v10 172.16.1.0/24(rw,no_root_squash)
    EOF
    [root@k8s-m-01 /nfs]# exportfs -arv
    exporting 172.16.0.0/16:/nfs/v10
    exporting 172.16.0.0/16:/nfs/v9
    exporting 172.16.0.0/16:/nfs/v8
    exporting 172.16.0.0/16:/nfs/v7
    exporting 172.16.0.0/16:/nfs/v6
    exporting 172.16.0.0/16:/nfs/v5
    exporting 172.16.0.0/16:/nfs/v4
    exporting 172.16.0.0/16:/nfs/v3
    exporting 172.16.0.0/16:/nfs/v2
    exporting 172.16.0.0/16:/nfs/v1
    [root@k8s-m-01 /nfs]# showmount -e
    Export list for k8s-m-01:
    /nfs/v10 172.16.0.0/16
    /nfs/v9  172.16.0.0/16
    /nfs/v8  172.16.0.0/16
    /nfs/v7  172.16.0.0/16
    /nfs/v6  172.16.0.0/16
    /nfs/v5  172.16.0.0/16
    /nfs/v4  172.16.0.0/16
    /nfs/v3  172.16.0.0/16
    /nfs/v2  172.16.0.0/16
    /nfs/v1  172.16.0.0/16

    3.测试k8s使用nfs
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs
spec:
  selector:
    matchLabels:
      app: nfs

  template:
    metadata:
      labels:
        app: nfs
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: nfs
      volumes:
        - name: nfs
          nfs:
            path: /nfs/v1
            server: 172.16.1.51
    4. 使用pv/pvc来管理nfs
#########################################################################################
#  1、部署MySQL集群
#     1、创建命名空间
#     2、创建service提供负载均衡
#     3、使用控制器部署MySQL实例
###
#  2、部署Discuz应用
#     1、创建命名空间
#     2、创建Service提供负载均衡(Headless Service)
#     3、创建服务并挂载代码
#     4、创建Ingress,用于域名转发(https)
###
#  3、服务之间的互连
#     1、Discuz连接MySQL  --->  mysql.mysql.svc.cluster.local
#########################################################################################

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-mysql
spec:
  nfs:
    path: /nfs/v3
    server: 172.16.1.51
  capacity:
    storage: 20Gi
  persistentVolumeReclaimPolicy: Retain
  accessModes:
    - "ReadWriteOnce"
    - "ReadWriteMany"
---
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql-svc
  namespace: mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
  selector:
    app: mysql
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-mysql-pvc
  namespace: mysql
spec:
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "20Gi"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-deployment
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
      deploy: discuz
  template:
    metadata:
      labels:
        app: mysql
        deploy: discuz
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          livenessProbe:
            tcpSocket:
              port: 3306
          readinessProbe:
            tcpSocket:
              port: 3306
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
            - name: MYSQL_DATABASE
              value: "discuz"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-data
      volumes:
        - name: mysql-data
          persistentVolumeClaim:
            claimName: pv-mysql-pvc
---
kind: Namespace
apiVersion: v1
metadata:
  name: discuz
---
kind: Service
apiVersion: v1
metadata:
  name: discuz-svc
  namespace: discuz
spec:
  clusterIP: None
  ports:
    - port: 80
      targetPort: 80
      name: http
  selector:
    app: discuz
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-discuz
spec:
  nfs:
    path: /nfs/v4
    server: 172.16.1.51
  capacity:
    storage: 20Gi
  persistentVolumeReclaimPolicy: Retain
  accessModes:
    - "ReadWriteOnce"
    - "ReadWriteMany"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
  namespace: discuz
spec:
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz-deployment
  namespace: discuz
spec:
  replicas: 5
  selector:
    matchLabels:
      app: discuz
      deploy: discuz
  template:
    metadata:
      labels:
        app: discuz
        deploy: discuz
    spec:
      nodeName: k8s-m-03
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          livenessProbe:
            tcpSocket:
              port: 9000
          readinessProbe:
            tcpSocket:
              port: 9000
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
        - name: nginx
          image: alvinos/nginx:wordpress-v2
          livenessProbe:
            httpGet:
              port: 80
              path: /
          readinessProbe:
            httpGet:
              port: 80
              path: /
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
      volumes:
        - name: discuz-data
          persistentVolumeClaim:
            claimName: pv-discuz-pvc
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: discuz-ingress
  namespace: discuz
spec:
  tls:
    - hosts:
        - www.discuz.cluster.local.com
      secretName: discuz-secret
  rules:
    - host: www.discuz.cluster.local.com
      http:
        paths:
          - backend:
              serviceName: discuz-svc
              servicePort: 80

健康检查

健康检查

怎样保证pod中的容器正常启动?

怎样保证pod中容器能够正常对外提供服务?

只有容器启动了并且能够正常对外提供服务了,才能放到负载均衡上供给用户访问

1、检查pod中容器是否能够正常启动

pod中所有容器的status=Running时,Pod的状态才会是Running状态。

当存活性检查检测失败的时候,kebulet会删除容器,重新启动一个新的容器。继续检查。
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            exec:
              command:
                - cat
                - /root/test/manage.py

# httpGet检查
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            httpGet:
              port: 80
              path: /index

# TcpSocket 相当于 ping
---
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            tcpSocket: 
              port: 80 

1.1、监控检查参数

apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            tcpSocket: 
              port: 80 

# 检查失败最少测试,默认:3

delay=10s   : 探测延时时间initialDelaySeconds
timeout=1s  :探测的超时时间
period=10s  :探测的频率
success=1   :成功多少次才算成功
failure=1   :失败多少次才算失败

failureThreshold:最少连续几次探测失败的次数,满足该次数则认为fail
initialDelaySeconds:容器启动之后开始进行存活性探测的秒数。不填立即进行
periodSeconds:执行探测的频率(秒)。默认为10秒。最小值为1。
successThreshold:探测失败后,最少连续探测成功多少次才被认定为成功,满足该次数则认为success。(但是如果是liveness则必须是 1。最小值是 1。)
timeoutSeconds:每次执行探测的超时时间,默认1秒,最小1秒。

2、检查pod中容器是否能够正常对外提供服务

# 就绪性探测的特点是探测失败,立即移出负载均衡(endprints ---> NotReadyAddresses)

---
kind: Service
apiVersion: v1
metadata:
  name: readnessprobe
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: readnessprobe
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: readnessprobe
spec:
  selector:
    matchLabels:
      app: readnessprobe
  template:
    metadata:
      labels:
        app: readnessprobe
    spec:
      containers:
        - name: readnessprobe
          image: alvinos/django:v1
          readnessProbe:
            exec:
              command:
                - cat
                - /root/test/manage.py

总结

存活性探测:探测失败,立即删除容器
就绪性探测:探测失败,立即移除负载均衡

ingess nginx

1、HeadLess Service

HeadLessService实际上是属于ClusterIP

2、Service在创建之前是可以自定义IP的

3、Service跟Pod之间关系

service ---> endpoints  ---> pod

4、Ingress

nginx ingress  :  性能强
traefik :原生支持k8s
istio : 服务网格,服务流量的治理

5、Ingress Nginx

1、创建HTTPS证书
openssl genrsa -out tls.key 2048
openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test.com

2、部署证书
kubectl -n wordpress create secret tls ingress-tls --cert=tls.crt --key=tls.key

3、创建Ingress
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - secretName: ingress-tls  # secret名字
  rules:
    - host: www.test.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress
              servicePort: 80

4、查看ingress暴露的443端口
[root@k8s-m-01 ~]# kubectl get svc -n ingress-nginx 
NAME                                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.25.245   <none>        80:52109/TCP,443:45419/TCP   30m

5、浏览器访问
https://www.test.com:45419/
# 案例
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql
  namespace: mysql
spec:
  ports:
    - name: http
      port: 3306
      targetPort: 3306
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: mysql:5.7.33
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: wordpress
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress
  template:
    metadata:
      labels:
        app: wordpress
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
        - name: nginx
          image: alvinos/nginx:wordpress-v2

---
apiVersion: v1
kind: Service
metadata:
  name: wordpress
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: wordpress
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - secretName: ingress-tls
  rules:
    - host: www.test.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress
              servicePort: 80

1、ingress种类

1、Nginx Ingress

2、treafik 

3、服务网格:istio

2、安装Ingress

# 下载Ingress Nginx配置清单
[root@k8s-m-01 ~]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.44.0/deploy/static/provider/baremetal/deploy.yaml

# 修改镜像
[root@k8s-m-01 ~]# sed -i 's#k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a#registry.cn-hangzhou.aliyuncs.com/k8sos/ingress-controller:v0.44.0#g' deploy.yaml

# 开始部署
[root@k8s-m-01 ~]# kubectl apply -f deploy.yaml

# 检查
[root@k8s-m-01 ~]# kubectl get pods -n ingress-nginx 
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-g9brk        0/1     Completed   0          3d22h
ingress-nginx-admission-patch-tzlgf         0/1     Completed   0          3d22h
ingress-nginx-controller-8494fd5b55-wpf9g   1/1     Running     0          3d22h

3、测试http

1、部署服务(Deployment + Service)

2、编写ingress配置清单(见下文)
  • 配置清单
pod+service配置
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ingress-test
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx
---
kind: Service
apiVersion: v1
metadata:
  name: wordpress-nginx
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: nginx

ingress nginx 配置

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

4、测试https

1、创建证书
[root@k8s-m-01 ~]# openssl genrsa -out tls.key 2048
[root@k8s-m-01 ~]# openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test-nginx.com

2、部署证书
[root@k8s-m-01 ~]# kubectl -n default create secret tls ingress-tls --cert=tls.crt --key=tls.key

3、编写ingress配置清单(见下文)

4、部署并测试
[root@k8s-m-01 ~]# curl -k https://www.test-nginx.com:44490/
  • 配置清单

”’bash

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - hosts: 
        - www.test-nginx.com
      secretName: ingress-tls
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

5、nginx ingress常用语法

https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#service-upstream

# 域名重定向(不能重定向 / )
nginx.ingress.kubernetes.io/rewrite-target

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: https://www.baidu.com/s?wd=nginx
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 设置ingress白名单
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/whitelist-source-range: 192.168.15.53,192.168.15.52
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 域名重定向
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/permanent-redirect: https://www.baidu.com
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 使用正则的方式匹配(支持的正则比较少)
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: https://www.baidu.com/s?wd=$1
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /search/(.+)
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# nginx登录
https://kubernetes.github.io/ingress-nginx/examples/auth/basic/

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/auth-type: basic
    nginx.ingress.kubernetes.io/auth-secret: basic-auth
    # nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo'
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

6、设置nginx常用用法的时候

有两种方式:
    1、注解        : 当前ingress生效
    2、configMap  : 全局ingress生效

4.控制器 和service

基础应用

1、k8s中的名称空间

k8s中名称空间是用来隔离集群资源,而k8s中的资源也分为名称空间级资源以及集群级资源。

# kubectl是k8s客户端,它跟k8s没有任何关系。
## kubectl get [资源名称] 获取集群资源的命令

# 获取名称空间
[root@k8s-m-01 ~]# kubectl get namespace
NAME              STATUS   AGE
default           Active   5d16h
kube-node-lease   Active   5d16h
kube-public       Active   5d16h
kube-system       Active   5d16h
[root@k8s-m-01 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   5d16h
kube-node-lease   Active   5d16h
kube-public       Active   5d16h
kube-system       Active   5d16h

# 注:部署应用一般是部署在自己的名称空间之内

[root@k8s-m-01 ~]# kubectl create namespace wordpress
namespace/wordpress created

2、标签

# docker中的TAG = 仓库URL/名称空间/仓库名称:版本号

k8s当做标签是用来管理(识别一系列)容器,方便与管理和监控拥有同一标签的所有容器

apiVersion: v1
kind: Pod
metadata:
  name: test-tag
  labels:
    release: stable
spec:
  containers:
    - name: nginx
      image: nginx

# 查看label
[root@k8s-m-01 ~]# kubectl get pod --show-labels

# 增加标签
kubectl label pod(资源类型) test-tag app=tag

[root@k8s-m-01 ~]# kubectl label pod test-tag app=tag
pod/test-tag labeled
[root@k8s-m-01 ~]# kubectl get pod --show-labels 
NAME                     READY   STATUS             RESTARTS   AGE     LABELS
test-tag                 0/1     ImagePullBackOff   0          2m15s   app=tag,release=stable

# 删除标签
[root@k8s-m-01 ~]# kubectl label pod test-tag app-
pod/test-tag labeled

# 修改标签
## 先删除后增加

3、k8s中的命名规范

1、必须小写
2、必须以字母开头
3、名称当中只能够包含字母、数字和中划线(-)

4、k8s中常用的命令

# 获取资源
kubectl get [资源名称]

# 创建资源
kubectl apply [资源类型] [资源名称]
kubectl apply -f [资源清单的路径]

5、控制器

k8s中控制器分为:deployment、DaemonSet、StatufluSet
Deployment:一般用来部署长期运行的、无状态的应用
    特点:集群之中,随机部署
DaemonSet:每一个节点上部署一个Pod,删除节点自动删除对应的POD(zabbix-agent)
    特点:每一台上有且只有一台
StatudfluSet: 部署有状态应用
    特点:有启动顺序

控制器使用来做什么的?
    - 管理Pod

Deploymnet:在Deployment对象中描述所需的状态,然后Deployment控制器将实际状态以受控的速率更改为所需的状态。

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      release: stable
  template:
    metadata:
      name: test-tag
      labels:
        release: stable
    spec:
      containers:
        - name: nginx
          image: nginx

# 弹性扩容
1、修改配置清单
[root@k8s-m-01 ~]# kubectl edit deployments deployment 
deployment.apps/deployment edited

2、打标签
[root@k8s-m-01 ~]# kubectl patch deployments.apps deployment -p '{"spec":{"replicas":40}}'

3、scale
[root@k8s-m-01 ~]# kubectl scale deployment/deployment --replicas=4

# 更新
apiVersion: apps/v1
kind: Deployment
metadata:
  name: django
spec:
  replicas: 1
  selector:
    matchLabels:
      app: stable
  template:
    metadata:
      labels:
        app: stable
    spec:
      containers:
        - name: nginx2
          image: nginx:1.17.10
        - name: nginx1
          image: nginx:1.17.10
# 更新镜像
1、打标签
[root@k8s-m-01 ~]# kubectl patch deployments.apps django -p '{"spec":{"template":{"spec":{"containers":[{"image":"nginx:1.18.0", "name":"nginx"}]}}}}'

2、修改配置清单

3、设置镜像
[root@k8s-m-01 ~]# kubectl set image deployment/django nginx=nginx:1.16.0
deployment.apps/django image updated

4、edit
kubectl edit [资源类型] [资源名称]

# 回滚
[root@k8s-m-01 ~]# kubectl rollout undo deployment django 
deployment.apps/django rolled back

[root@k8s-m-01 ~]# kubectl rollout undo deployment django --to-revision=1
deployment.apps/django rolled back

5、DaemonSet

# 在集群中所有的节点上部署只部署一个Pod

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: zabbix-agent
spec:
  selector:
    matchLabels:
      app: zabbix-agent
  template:
    metadata:
      labels:
        app: zabbix-agent
    spec:
      containers:
        - name: zabbix-agent
          image: zabbix/zabbix-agent:5.2.6-centos

# 更新
1、修改配置文件
[root@k8s-m-01 ~]# kubectl edit daemonsets.apps zabbix-agent 
daemonset.apps/zabbix-agent edited

2、打标签的方式
[root@k8s-m-01 ~]# kubectl patch daemonsets.apps zabbix-agent  -p '{"spec":{"template":{"spec":{"containers":[{"image":"zabbix/zabbix-agent:centos-5.2.4", "name":"zabbix-agent"}]}}}}'
daemonset.apps/zabbix-agent patched

3、设置镜像
[root@k8s-m-01 ~]# kubectl set image daemonset/zabbix-agent zabbix-agent=zabbix/zabbix-agent:centos-5.2.3
daemonset.apps/zabbix-agent image updated

# 回滚

## 回滚到上一个版本
[root@k8s-m-01 ~]# kubectl rollout undo daemonset zabbix-agent 
daemonset.apps/zabbix-agent rolled back

## 回滚到指定版本
[root@k8s-m-01 ~]# kubectl rollout undo daemonset zabbix-agent --to-revision=1
daemonset.apps/zabbix-agent rolled back

1、StatefluSet控制器

# StatufluSet最主要的是用来部署有状态应用

# StatefulSet部署是有状态应用,拥有统一的网络,并且有启动顺序。

2、使用statefulset部署WordPress

kind: Service
apiVersion: v1
metadata:
  name: wordpress-test
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: wordpress-test
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
  name: wordpress-test
spec:
  serviceName: wordpress-test
  selector:
    matchLabels:
      app: wordpress-test
  template:
    metadata:
      labels:
        app: wordpress-test
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
        - name: nginx
          image: alvinos/nginx:wordpress-v2

6、智能负载均衡器(service)

怎样让外界的可访问我们的服务?
怎样找到对应的POD?

# 关联哪些POD
# 怎样暴露服务

apiVersion: v1
kind: Service
metadata:
  name: service
spec:
  selector:
    release: stable
  ports:
    - name: http
      port: 80
      targetPort: 80
      protocol: "TCP"
    - name: https
      port: 443
      targetPort: 443
      protocol: "TCP"

7、service当中的四种类型

# CluserIP : 向集群内部暴露一个IP

# NodePort : 在宿主主机中开启一个端口与负载均衡IP的端口一一对应,外界可以使用宿主主机的端口访问集群内部服务

# LoadBalancer:是实现暴露服务的另一种解决方案,它依赖于公有云弹性IP实现

1、ExternalName

ExternalName:将其他链接设置一个集群内部的别名。

xxx.baidu.com

xxx.aliyun.com

c.abc.com   ----> a.abc.com
c.abc.com   ----> b.abc.com

A项目             --->  B项目
a.abc.com               b.abc.com
curl a.abc.com          curl b.abc.com

apiVersion: v1
kind: Service
metadata:
  name: baidu
spec:
  externalName: www.baidu.com
  type: ExternalName

3、Service相关

```bash
1、headless service
headless service的使用场景?
仅仅需要关联Pod的场景中可以使用此类型

2、Service与Pod之间的关系

service -> endprints -> pod

[root@k8s-m-01 ~]# kubectl describe service service

4、Ingress

# ingress是基于域名的网络转发资源

## 部署ingress nginx
1、下载部署文件
wget  https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.44.0/deploy/static/provider/baremetal/deploy.yaml

2、修改镜像
sed -i 's#k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a#registry.cn-hangzhou.aliyuncs.com/k8sos/ingress-controller:v0.44.0#g'  deploy.yaml

3、部署
[root@k8s-m-01 ~]# kubectl apply -f deploy.yaml 

4、开始编辑ingress配置清单并部署
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress
  namespace: default
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
    - host: www.test.com
      http:
        paths:
          - path: /
            backend:
              serviceName: service
              servicePort: 80

5、修改hosts解析
192.168.15.31  www.test.com

6、测试使用域名访问
[root@k8s-m-01 ~]# kubectl get svc -n ingress-nginx 

3. pod

k8s

k8s是一个容器化管理平台

POD

k8s集群中部署的最小单元。Pod最主要的功能管理是将一个业务或者一个调用链的所有服务(容器)

Pod的生命周期

Pod的初体验

# k8s配置清单

apiVersion
kind 
metadata
spec
status    # 部署状态()

apiVersion :指定k8s部署的api版本号
kind : 指定资源类型(pod)
metadata : 记录部署应用的基础信息
spec : 指定部署详情

string : 跟字符串
Object :
[]Object : 
    - name
# kubectl explain Pod
apiVersion: v1
kind: Pod
metadata:
  name: test-pod
spec:
  containers:
    - name: nginx
      image: nginx
    - name: tomcat
      image: tomcat

# k8s部署一个yaml的应用:kubectl apply -f [配置清单]

ImgPullErr : 镜像拉取失败
ContainerCreating : 容器创建中
apiVersion: v1 # 必选,API的版本号
kind: Pod    # 必选,类型Pod
metadata:    # 必选,元数据
  name: nginx    # 必选,符合RFC 1035规范的Pod名称
  namespace: web-testing # 可选,不指定默认为default,Pod所在的命名空间
  labels:    # 可选,标签选择器,一般用于Selector
    - app: nginx
  annotations:    # 可选,注释列表
    - app: nginx
spec:    # 必选,用于定义容器的详细信息
  containers:    # 必选,容器列表
  - name: nginx    # 必选,符合RFC 1035规范的容器名称
    image: nginx:v1 # 必选,容器所用的镜像的地址
    imagePullPolicy: Always    # 可选,镜像拉取策略
    workingDir: /usr/share/nginx/html    # 可选,容器的工作目录
    volumeMounts:    # 可选,存储卷配置
    - name: webroot # 存储卷名称
      mountPath: /usr/share/nginx/html # 挂载目录
      readOnly: true    # 只读
    ports:    # 可选,容器需要暴露的端口号列表
    - name: http    # 端口名称
      containerPort: 80    # 端口号
      protocol: TCP    # 端口协议,默认TCP
    env:    # 可选,环境变量配置
    - name: TZ    # 变量名
      value: Asia/Shanghai
    - name: LANG
      value: en_US.utf8
    resources:    # 可选,资源限制和资源请求限制
      limits:    # 最大限制设置
        cpu: 1000m
        memory: 1024MiB
      requests:    # 启动所需的资源
        cpu: 100m
        memory: 512MiB
    readinessProbe: # 可选,容器状态检查
      httpGet:    # 检测方式
        path: /    # 检查路径
        port: 80    # 监控端口
      timeoutSeconds: 2    # 超时时间 
      initialDelaySeconds: 60    # 初始化时间
    livenessProbe:    # 可选,监控状态检查
      exec:    # 检测方式
        command: 
        - cat
        - /health
      httpGet:    # 检测方式
        path: /_health
        port: 8080
        httpHeaders:
        - name: end-user
          value: jason
      tcpSocket:    # 检测方式
        port: 80
      initialDelaySeconds: 60    # 初始化时间
      timeoutSeconds: 2    # 超时时间
      periodSeconds: 5    # 检测间隔
      successThreshold: 2 # 检查成功为2次表示就绪
      failureThreshold: 1 # 检测失败1次表示未就绪
    securityContext:    # 可选,限制容器不可信的行为
      provoleged: false
  restartPolicy: Always    # 可选,默认为Always
  nodeSelector:    # 可选,指定Node节点
    region: subnet7
  imagePullSecrets:    # 可选,拉取镜像使用的secret
  - name: default-dockercfg-86258
  hostNetwork: false    # 可选,是否为主机模式,如是,会占用主机端口
  volumes:    # 共享存储卷列表
  - name: webroot # 名称,与上述对应
    emptyDir: {}    # 共享卷类型,空
    hostPath:        # 共享卷类型,本机目录
      path: /etc/hosts
    secret:    # 共享卷类型,secret模式,一般用于密码
      secretName: default-token-tf2jp # 名称
      defaultMode: 420 # 权限
      configMap:    # 一般用于配置文件
      name: nginx-conf
      defaultMode: 420

2.2 node节点安装

部署Node节点

node需要部署哪些组件?

kubelet、kube-proxy、flannel

集群规划

192.168.15.54  k8s-n-01 n1
192.168.15.55  k8s-n-02 n2

集群优化

# 做一下免密登录

分发软件包

[root@k8s-m-01 /opt/data]# for i in n1 n2;do scp flanneld mk-docker-opts.sh kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy root@$i:/usr/local/bin; done

分发证书

[root@k8s-m-01 /opt/data]# for i in n1 n2; do ssh root@$i "mkdir -pv /etc/kubernetes/ssl"; scp -pr /etc/kubernetes/ssl/{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl; done

分发配置文件

# flanneld、etcd的证书、docker.service

# 分发ETCD证书
[root@k8s-m-01 /etc/etcd/ssl]# for i in n1 n2 ;do ssh root@$i "mkdir -pv /etc/etcd/ssl"; scp ./*  root@$i:/etc/etcd/ssl; done

# 分发flannel和docker的启动脚本
[root@k8s-m-01 /etc/etcd/ssl]# for i in n1 n2;do scp /usr/lib/systemd/system/docker.service root@$i:/usr/lib/systemd/system/docker.service; scp /usr/lib/systemd/system/flanneld.service root@$i:/usr/lib/systemd/system/flanneld.service; done

部署kubelet

[root@k8s-m-01 ~]# for i in n1 n2 ;do 
    ssh root@$i "mkdir -pv  /etc/kubernetes/cfg";
    scp /etc/kubernetes/cfg/kubelet.conf root@$i:/etc/kubernetes/cfg/kubelet.conf; 
    scp /etc/kubernetes/cfg/kubelet-config.yml root@$i:/etc/kubernetes/cfg/kubelet-config.yml; 
    scp /usr/lib/systemd/system/kubelet.service root@$i:/usr/lib/systemd/system/kubelet.service; 
    scp /etc/kubernetes/cfg/kubelet.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet.kubeconfig; 
    scp /etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig; 
    scp /etc/kubernetes/cfg/token.csv root@$i:/etc/kubernetes/cfg/token.csv;
done

# 修改配置文件kubelet-config.yml和kubelet.conf

# 启动kubelet
[root@k8s-n-02 ~]# systemctl enable --now kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

部署kube-proxy

[root@k8s-m-01 ~]# for i in n1 n2 ; do 
    scp /etc/kubernetes/cfg/kube-proxy.conf root@$i:/etc/kubernetes/cfg/kube-proxy.conf;  
    scp /etc/kubernetes/cfg/kube-proxy-config.yml root@$i:/etc/kubernetes/cfg/kube-proxy-config.yml ;  
    scp /usr/lib/systemd/system/kube-proxy.service root@$i:/usr/lib/systemd/system/kube-proxy.service;  
    scp /etc/kubernetes/cfg/kube-proxy.kubeconfig root@$i:/etc/kubernetes/cfg/kube-proxy.kubeconfig;

    done
# 修改kube-proxy-config.yml中IP和主机名

# 启动
[root@k8s-n-02 ~]# systemctl enable --now kube-proxy.service 

加入集群

# 查看集群状态
[root@k8s-m-01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   

# 查看加入集群请求
[root@k8s-m-01 ~]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-_yClVuQCNzDb566yZV5sFJmLsoU13Wba0FOhQ5pmVPY   12m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-m3kFnO7GPBYeBcen5GQ1RdTlt77_rhedLPe97xO_5hw   12m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

# 批准加入
[root@k8s-m-01 ~]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
certificatesigningrequest.certificates.k8s.io/node-csr-_yClVuQCNzDb566yZV5sFJmLsoU13Wba0FOhQ5pmVPY approved
certificatesigningrequest.certificates.k8s.io/node-csr-m3kFnO7GPBYeBcen5GQ1RdTlt77_rhedLPe97xO_5hw approved

# 查看加入状态
[root@k8s-m-01 ~]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-_yClVuQCNzDb566yZV5sFJmLsoU13Wba0FOhQ5pmVPY   14m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-m3kFnO7GPBYeBcen5GQ1RdTlt77_rhedLPe97xO_5hw   14m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued

# 查看加入节点
[root@k8s-m-01 ~]# kubectl get nodes
NAME       STATUS   ROLES    AGE   VERSION
k8s-m-01   Ready    <none>   21h   v1.18.8
k8s-m-02   Ready    <none>   21h   v1.18.8
k8s-m-03   Ready    <none>   21h   v1.18.8
k8s-n-01   Ready    <none>   36s   v1.18.8
k8s-n-02   Ready    <none>   36s   v1.18.8

设置集群角色

[root@k8s-m-01 ~]# kubectl label nodes k8s-m-01 node-role.kubernetes.io/master=k8s-m-01
s.io/node=k8s-n-01
kubectl label nodes k8s-n-02 node-role.kubernetes.io/node=k8s-n-02node/k8s-m-01 labeled
[root@k8s-m-01 ~]# kubectl label nodes k8s-m-02 node-role.kubernetes.io/master=k8s-m-02
node/k8s-m-02 labeled
[root@k8s-m-01 ~]# kubectl label nodes k8s-m-03 node-role.kubernetes.io/master=k8s-m-03
node/k8s-m-03 labeled
[root@k8s-m-01 ~]# 
[root@k8s-m-01 ~]# kubectl label nodes k8s-n-01 node-role.kubernetes.io/node=k8s-n-01
node/k8s-n-01 labeled
[root@k8s-m-01 ~]# kubectl label nodes k8s-n-02 node-role.kubernetes.io/node=k8s-n-02
node/k8s-n-02 labeled
[root@k8s-m-01 ~]# kubectl get nodes
NAME       STATUS     ROLES    AGE    VERSION
k8s-m-01   Ready      master   21h    v1.18.8
k8s-m-02   Ready      master   21h    v1.18.8
k8s-m-03   NotReady   master   21h    v1.18.8
k8s-n-01   Ready      node     4m5s   v1.18.8
k8s-n-02   Ready      node     4m5s   v1.18.8

安装集群图形化界面

# https://github.com/kubernetes/dashboard

1617069075349

# 安装
[root@k8s-m-01 ~]# kubectl apply -f recommended.yaml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

# 开一个端口,用于访问
[root@k8s-m-01 ~]# kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard
type: ClusterIP   =>  type: NodePort

# 查看修改后得端口
[root@k8s-m-01 ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.96.86.206    <none>        8000/TCP        3m58s
kubernetes-dashboard        NodePort    10.96.113.185   <none>        443:46491/TCP   3m59s

# 创建token配置文件
[root@k8s-m-01 ~]# vim token.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

# 部署token到集群
[root@k8s-m-01 ~]# kubectl apply -f token.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

# 获取token
[root@k8s-m-01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') | grep token: | awk '{print $2}'

1617069542735

2.1 master节点安装

kubernetes

k8s和docker之间的关系?

k8s是一个容器化管理平台,docker是一个容器,

[TOC]

二进制部署

集群角色

  • master节点: 管理集群
  • node节点: 主要用来部署应用

Master节点部署插件

  • kube-apiserver : 中央管理器,调度管理集群
  • kube-controller-manager :控制器: 管理容器,监控容器
  • kube-scheduler:调度器:调度容器
  • flannel : 提供集群间网络
  • etcd:数据库
  • kubelet
  • kube-proxy

Node节点部署插件

  • kubelet : 部署容器,监控容器
  • kube-proxy : 提供容器间的网络

节点规划

192.168.15.51 172.16.1.51 k8s-m-01 m1 
192.168.15.52 172.16.1.52 k8s-m-02 m2
192.168.15.53 172.16.1.53 k8s-m-03 m3
192.168.15.54 172.16.1.54 k8s-n-01 n1 
192.168.15.55 172.16.1.55 k8s-n-02 n2

# 虚拟VIP
192.168.15.56 172.16.1.56 k8s-m-vip vip

插件规划

# Master节点规划
kube-apiserver
kube-controller-manager
kube-scheduler
flannel
etcd
kubelet
kube-proxy

# Node节点规划
kubelet
kube-proxy

系统优化

# 关闭selinux

# 关闭防火墙
systemctl disable --now firewalld

# 关闭swap分区
swapoff -a
修改/etc/fstab
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet   # kubelet忽略swap

# 做免密登录
[root@k8s-m-01 ~]# ssh-keygen -t rsa
[root@k8s-m-01 ~]# for i in m1 n1 n2;do  ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i; done

# 同步集群时间

# 配置镜像源
[root@k8s-m-01 ~]# curl  -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
[root@k8s-m-01 ~]# yum clean all
[root@k8s-m-01 ~]# yum makecache

# 更新系统
[root@k8s-m-01 ~]# yum update -y --exclud=kernel*

# 安装基础常用软件
[root@k8s-m-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 更新系统内核(docker 对系统内核要求比较高,最好使用4.4+)
    [root@k8s-m-01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm
    [root@k8s-m-01 ~]# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm
    ## 安装系统内容
    [root@k8s-m-01 ~]# yum localinstall -y kernel-lt*
    ## 调到默认启动
    [root@k8s-m-01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
    ## 查看当前默认启动的内核
    [root@k8s-m-01 ~]# grubby --default-kernel
    ## 重启
    [root@k8s-m-01 ~]# reboot

# 安装IPVS
    yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

    ## 加载IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF

    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 修改内核启动参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 立即生效
sysctl --system

安装docker

# 卸载之前安装过得docker
[root@k8s-m-01 ~]# sudo yum remove docker docker-common docker-selinux docker-engine

# 安装docker需要的依赖包
[root@k8s-m-01 ~]# sudo yum install -y yum-utils device-mapper-persistent-data lvm2

# 安装dockeryum源
[root@k8s-m-01 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo

# 安装docker
[root@k8s-m-01 ~]# yum install docker-ce -y

# 设置开机自启动
[root@k8s-m-01 ~]# systemctl enable --now docker.service

集群证书

# 以下命令只需要在master01执行即可

# 安装证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
chmod +x cfssljson_linux-amd64
chmod +x cfssl_linux-amd64

# 移动到/usr/local/bin
mv cfssljson_linux-amd64 cfssljson
mv cfssl_linux-amd64 cfssl
mv cfssljson cfssl /usr/local/bin

生成根证书

mkdir -p /opt/cert/ca

cat > /opt/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "8760h"
      }
    }
  }
}
EOF

生成根证书请求文件

cat > /opt/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "ShangHai",
    "L": "ShangHai"
  }]
}
EOF

生成根证书

[root@k8s-m-01 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2021/03/26 17:34:55 [INFO] generating a new CA key and certificate from CSR
2021/03/26 17:34:55 [INFO] generate received request
2021/03/26 17:34:55 [INFO] received CSR
2021/03/26 17:34:55 [INFO] generating key: rsa-2048
2021/03/26 17:34:56 [INFO] encoded CSR
2021/03/26 17:34:56 [INFO] signed certificate with serial number 661764636777400005196465272245416169967628201792
[root@k8s-m-01 /opt/cert/ca]# ll
total 20
-rw-r--r-- 1 root root  285 Mar 26 17:34 ca-config.json
-rw-r--r-- 1 root root  960 Mar 26 17:34 ca.csr
-rw-r--r-- 1 root root  153 Mar 26 17:34 ca-csr.json
-rw------- 1 root root 1675 Mar 26 17:34 ca-key.pem
-rw-r--r-- 1 root root 1281 Mar 26 17:34 ca.pem

部署ETCD集群

节点规划

192.168.15.51 etcd-01
192.168.15.52 etcd-01
192.168.15.53 etcd-01

创建ETCD集群证书

mkdir -p /opt/cert/etcd
cd /opt/cert/etcd

cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "192.168.15.51",
        "192.168.15.52",
        "192.168.15.53",
        "192.168.15.54",
        "192.168.15.55",
        "192.168.15.56"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "ShangHai",
          "L": "ShangHai"
        }
    ]
}
EOF
生成ETCD证书
[root@k8s-m-01 /opt/cert/etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2021/03/26 17:38:57 [INFO] generate received request
2021/03/26 17:38:57 [INFO] received CSR
2021/03/26 17:38:57 [INFO] generating key: rsa-2048
2021/03/26 17:38:58 [INFO] encoded CSR
2021/03/26 17:38:58 [INFO] signed certificate with serial number 179909685000914921289186132666286329014949215773
2021/03/26 17:38:58 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
分发ETCD证书
[root@k8s-m-01 /opt/cert/etcd]# for ip in m1 m2 m3;do 
   ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
   scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
   scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl   
 done

mkdir: created directory ‘/etc/etcd’
mkdir: created directory ‘/etc/etcd/ssl’
ca-key.pem                                                      100% 1675   299.2KB/s   00:00    
ca.pem                                                          100% 1281   232.3KB/s   00:00    
etcd-key.pem                                                    100% 1675     1.4MB/s   00:00    
etcd.pem                                                        100% 1379   991.0KB/s   00:00    
mkdir: created directory ‘/etc/etcd’
mkdir: created directory ‘/etc/etcd/ssl’
ca-key.pem                                                      100% 1675     1.1MB/s   00:00    
ca.pem                                                          100% 1281   650.8KB/s   00:00    
etcd-key.pem                                                    100% 1675   507.7KB/s   00:00    
etcd.pem                                                        100% 1379   166.7KB/s   00:00    
mkdir: created directory ‘/etc/etcd’
mkdir: created directory ‘/etc/etcd/ssl’
ca-key.pem                                                      100% 1675   109.1KB/s   00:00    
ca.pem                                                          100% 1281   252.9KB/s   00:00    
etcd-key.pem                                                    100% 1675   121.0KB/s   00:00    
etcd.pem                                                        100% 1379   180.4KB/s   00:00    
[root@k8s-m-01 /opt/cert/etcd]# ll /etc/etcd/ssl/
total 16
-rw------- 1 root root 1675 Mar 26 17:41 ca-key.pem
-rw-r--r-- 1 root root 1281 Mar 26 17:41 ca.pem
-rw------- 1 root root 1675 Mar 26 17:41 etcd-key.pem
-rw-r--r-- 1 root root 1379 Mar 26 17:41 etcd.pem

部署ETCD

# 下载ETCD安装包
wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz

# 解压
tar xf etcd-v3.3.24-linux-amd64

# 分发至其他节点
for i in m1 m2 m3
do
    scp ./etcd-v3.3.24-linux-amd64/etcd* root@$i:/usr/local/bin/
done
[root@k8s-m-01 /opt/etcd-v3.3.24-linux-amd64]# etcd --version
etcd Version: 3.3.24
Git SHA: bdd57848d
Go Version: go1.12.17
Go OS/Arch: linux/amd64

注册ETCD服务

# 在三台master节点上执行
mkdir -pv /etc/kubernetes/conf/etcd

ETCD_NAME=`hostname`
INTERNAL_IP=`hostname -i`
INITIAL_CLUSTER=k8s-m-01=https://192.168.15.51:2380,k8s-m-02=https://192.168.15.52:2380,k8s-m-03=https://192.168.15.53:2380

cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动ETCD服务
systemctl enable --now etcd

测试ETCD服务

# 第一种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379" \
endpoint status --write-out='table'

# 第二种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379" \
member list --write-out='table'

部署master节点

主要把master节点上的各个组件部署成功。

集群规划

192.168.15.51 172.16.1.51 k8s-m-01 m1 
192.168.15.52 172.16.1.52 k8s-m-02 m2
192.168.15.53 172.16.1.53 k8s-m-03 m3

kube-apiserver、控制器、调度器、flannel、etcd、kubelet、kube-proxy、DNS

创建证书

创建集群证书

创建集群CA证书
# 只需要在master01上执行
[root@kubernetes-master-01 k8s]# mkdir /opt/cert/k8s
[root@kubernetes-master-01 k8s]# cd /opt/cert/k8s
[root@kubernetes-master-01 k8s]# pwd
/opt/cert/k8s
[root@kubernetes-master-01 k8s]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
[root@kubernetes-master-01 k8s]# cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF
[root@kubernetes-master-01 k8s]# ll
total 8
-rw-r--r-- 1 root root 294 Sep 13 19:59 ca-config.json
-rw-r--r-- 1 root root 212 Sep 13 20:01 ca-csr.json
[root@kubernetes-master-01 k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2020/09/13 20:01:45 [INFO] generating a new CA key and certificate from CSR
2020/09/13 20:01:45 [INFO] generate received request
2020/09/13 20:01:45 [INFO] received CSR
2020/09/13 20:01:45 [INFO] generating key: rsa-2048
2020/09/13 20:01:46 [INFO] encoded CSR
2020/09/13 20:01:46 [INFO] signed certificate with serial number 588993429584840635805985813644877690042550093427
[root@kubernetes-master-01 k8s]# ll
total 20
-rw-r--r-- 1 root root  294 Sep 13 19:59 ca-config.json
-rw-r--r-- 1 root root  960 Sep 13 20:01 ca.csr
-rw-r--r-- 1 root root  212 Sep 13 20:01 ca-csr.json
-rw------- 1 root root 1679 Sep 13 20:01 ca-key.pem
-rw-r--r-- 1 root root 1273 Sep 13 20:01 ca.pem
创建集群普通证书

创建集群各个组件之间的证书

创建kube-apiserver的证书
[root@k8s-m-01 /opt/cert/k8s]# mkdir /opt/cert/k8s
[root@k8s-m-01 /opt/cert/k8s]# cd /opt/cert/k8s
[root@k8s-m-01 /opt/cert/k8s]# cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "192.168.15.51",
        "192.168.15.52",
        "192.168.15.53",
        "192.168.15.54",
        "192.168.15.55",
        "192.168.15.56",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

[root@k8s-m-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
2021/03/29 09:31:02 [INFO] generate received request
2021/03/29 09:31:02 [INFO] received CSR
2021/03/29 09:31:02 [INFO] generating key: rsa-2048
2021/03/29 09:31:02 [INFO] encoded CSR
2021/03/29 09:31:02 [INFO] signed certificate with serial number 475285860832876170844498652484239182294052997083
2021/03/29 09:31:02 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-m-01 /opt/cert/k8s]# ll
total 36
-rw-r--r-- 1 root root  294 Mar 29 09:13 ca-config.json
-rw-r--r-- 1 root root  960 Mar 29 09:16 ca.csr
-rw-r--r-- 1 root root  214 Mar 29 09:14 ca-csr.json
-rw------- 1 root root 1675 Mar 29 09:16 ca-key.pem
-rw-r--r-- 1 root root 1281 Mar 29 09:16 ca.pem
-rw-r--r-- 1 root root 1245 Mar 29 09:31 server.csr
-rw-r--r-- 1 root root  603 Mar 29 09:29 server-csr.json
-rw------- 1 root root 1675 Mar 29 09:31 server-key.pem
-rw-r--r-- 1 root root 1574 Mar 29 09:31 server.pem
创建controller-manager的证书
[root@k8s-m-01 /opt/cert/k8s]# cat > kube-controller-manager-csr.json << EOF
> {
>     "CN": "system:kube-controller-manager",
>     "hosts": [
>         "127.0.0.1",
>         "192.168.15.51",
>         "192.168.15.52",
>         "192.168.15.53",
>         "192.168.15.54",
>         "192.168.15.55",
>         "192.168.15.56",
>     ],
>     "key": {
>         "algo": "rsa",
>         "size": 2048
>     },
>     "names": [
>         {
>             "C": "CN",
>             "L": "ShangHai",
>             "ST": "ShangHai",
>             "O": "system:kube-controller-manager",
>             "OU": "System"
>         }
>     ]
> }
> EOF
[root@k8s-m-01 /opt/cert/k8s]# vim kube-controller-manager-csr.json 
[root@k8s-m-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
2021/03/29 09:33:31 [INFO] generate received request
2021/03/29 09:33:31 [INFO] received CSR
2021/03/29 09:33:31 [INFO] generating key: rsa-2048
2021/03/29 09:33:31 [INFO] encoded CSR
2021/03/29 09:33:31 [INFO] signed certificate with serial number 159207911625502250093013220742142932946474251607
2021/03/29 09:33:31 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
创建kube-scheduler的证书
[root@k8s-m-01 /opt/cert/k8s]# cat > kube-scheduler-csr.json << EOF
> {
>     "CN": "system:kube-scheduler",
>     "hosts": [
>         "127.0.0.1",
>         "192.168.15.51",
>         "192.168.15.52",
>         "192.168.15.53",
>         "192.168.15.54",
>         "192.168.15.55",
>         "192.168.15.56"
>     ],
>     "key": {
>         "algo": "rsa",
>         "size": 2048
>     },
>     "names": [
>         {
>             "C": "CN",
>             "L": "BeiJing",
>             "ST": "BeiJing",
>             "O": "system:kube-scheduler",
>             "OU": "System"
>         }
>     ]
> }
> EOF
[root@k8s-m-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
2021/03/29 09:34:57 [INFO] generate received request
2021/03/29 09:34:57 [INFO] received CSR
2021/03/29 09:34:57 [INFO] generating key: rsa-2048
2021/03/29 09:34:58 [INFO] encoded CSR
2021/03/29 09:34:58 [INFO] signed certificate with serial number 38647006614878532408684142936672497501281226307
2021/03/29 09:34:58 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
创建kube-proxy证书
[root@k8s-m-01 /opt/cert/k8s]# cat > kube-proxy-csr.json << EOF
> {
>     "CN":"system:kube-proxy",
>     "hosts":[],
>     "key":{
>         "algo":"rsa",
>         "size":2048
>     },
>     "names":[
>         {
>             "C":"CN",
>             "L":"BeiJing",
>             "ST":"BeiJing",
>             "O":"system:kube-proxy",
>             "OU":"System"
>         }
>     ]
> }
> EOF
[root@k8s-m-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
2021/03/29 09:37:44 [INFO] generate received request
2021/03/29 09:37:44 [INFO] received CSR
2021/03/29 09:37:44 [INFO] generating key: rsa-2048
2021/03/29 09:37:44 [INFO] encoded CSR
2021/03/29 09:37:44 [INFO] signed certificate with serial number 703321465371340829919693910125364764243453439484
2021/03/29 09:37:44 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
创建集群管理员证书
[root@k8s-m-01 /opt/cert/k8s]# cat > admin-csr.json << EOF
> {
>     "CN":"admin",
>     "key":{
>         "algo":"rsa",
>         "size":2048
>     },
>     "names":[
>         {
>             "C":"CN",
>             "L":"BeiJing",
>             "ST":"BeiJing",
>             "O":"system:masters",
>             "OU":"System"
>         }
>     ]
> }
> EOF
[root@k8s-m-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
2021/03/29 09:36:26 [INFO] generate received request
2021/03/29 09:36:26 [INFO] received CSR
2021/03/29 09:36:26 [INFO] generating key: rsa-2048
2021/03/29 09:36:26 [INFO] encoded CSR
2021/03/29 09:36:26 [INFO] signed certificate with serial number 258862825289855717894394114308507213391711602858
2021/03/29 09:36:26 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
颁发证书
[root@k8s-m-01 /opt/cert/k8s]# mkdir -pv /etc/kubernetes/ssl
[root@k8s-m-01 /opt/cert/k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl

[root@k8s-m-01 /opt/cert/k8s]# for i in m1 m2 m3;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

编写配置文件以及下载安装包

  • 下载安装包
# 下载安装包
## 下载server安装包
[root@k8s-m-01 /opt/data]# wget https://dl.k8s.io/v1.18.8/kubernetes-server-linux-amd64.tar.gz

## 从容器中复制
[root@k8s-m-01 /opt/data]# docker run -it  registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash

## 分发组件
[root@k8s-m-01 /opt/data]# tar -xf kubernetes-server-linux-amd64.tar.gz
[root@k8s-m-01 /opt/data]# cd kubernetes/server/bin
[root@k8s-m-01 /opt/data]# for i in m1 m2 m3 ;do  scp kube-apiserver kube-controller-manager [root@k8s-m-01 /opt/cert/k8s]#kube-proxy kubectl kubelet kube-scheduler root@$i:/usr/local/bin; done
  • 创建集群配置文件

    • 创建kube-controller-manager.kubeconfig
    ## 创建kube-controller-manager.kubeconfig
    export KUBE_APISERVER="https://192.168.15.56:8443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-controller-manager.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials "kube-controller-manager" \
      --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
      --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-controller-manager.kubeconfig
    
    # 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-controller-manager" \
      --kubeconfig=kube-controller-manager.kubeconfig
    
    # 配置默认上下文
    kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
    
    • 创建kube-scheduler.kubeconfig
    # 创建kube-scheduler.kubeconfig
    export KUBE_APISERVER="https://192.168.15.56:8443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-scheduler.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials "kube-scheduler" \
      --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
      --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-scheduler.kubeconfig
    
    # 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-scheduler" \
      --kubeconfig=kube-scheduler.kubeconfig
    
    # 配置默认上下文
    kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
    
    • 创建kube-proxy.kubeconfig集群配置文件
    ## 创建kube-proxy.kubeconfig集群配置文件
    export KUBE_APISERVER="https://192.168.15.56:8443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-proxy.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials "kube-proxy" \
      --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
      --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
    
    # 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-proxy" \
      --kubeconfig=kube-proxy.kubeconfig
    
    # 配置默认上下文
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    • 创建超级管理员的集群配置文件
    export KUBE_APISERVER="https://192.168.15.56:8443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=admin.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials "admin" \
      --client-certificate=/etc/kubernetes/ssl/admin.pem \
      --client-key=/etc/kubernetes/ssl/admin-key.pem \
      --embed-certs=true \
      --kubeconfig=admin.kubeconfig
    
    # 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="admin" \
      --kubeconfig=admin.kubeconfig
    
    # 配置默认上下文
    kubectl config use-context default --kubeconfig=admin.kubeconfig
    • 颁发集群配置文件
    [root@k8s-m-01 /opt/cert/k8s]# for i in m1 m2 m3; do
    > ssh root@$i  "mkdir -pv /etc/kubernetes/cfg"
    > scp ./*.kubeconfig root@$i:/etc/kubernetes/cfg
    > done
    [root@k8s-m-01 /opt/cert/k8s]# ll /etc/kubernetes/cfg/
    total 32
    -rw------- 1 root root 6103 Mar 29 10:32 admin.kubeconfig
    -rw------- 1 root root 6319 Mar 29 10:32 kube-controller-manager.kubeconfig
    -rw------- 1 root root 6141 Mar 29 10:32 kube-proxy.kubeconfig
    -rw------- 1 root root 6261 Mar 29 10:32 kube-scheduler.kubeconfig
    
    • 创建集群token
    # 只需要创建一次
    # 必须要用自己机器创建的Token
    TLS_BOOTSTRAPPING_TOKEN=head -c 16 /dev/urandom | od -An -t x | tr -d ' '
    
    cat > token.csv << EOF
    ${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    
    # 分发集群token,用于集群TLS认证
    [root@k8s-m-01 /opt/cert/k8s]# for i in m1 m2 m3;do
    > scp token.csv root@$i:/etc/kubernetes/cfg/
    > done

部署各个组件

安装各个组件,使其可以正常工作

安装kube-apiserver

创建kube-apiserver的配置文件
# 在所有的master节点上执行
KUBE_APISERVER_IP=`hostname -i`

cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF
注册kube-apiserver的服务
# 在所有的master节点上执行
[root@k8s-m-01 /opt/cert/k8s]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-m-01 /opt/cert/k8s]# systemctl daemon-reload
对kube-apiserver做高可用
  • 安装高可用软件

    # 三台master节点都需要安装
    # keeplived + haproxy
    [root@k8s-m-01 ~]# yum install -y keepalived haproxy
  • 修改keepalived配置文件

    # 根据节点的不同,修改的配置也不同
    mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
    
    cd /etc/keepalived
    
    KUBE_APISERVER_IP=hostname -i
    
    cat > /etc/keepalived/keepalived.conf <
  • 修改haproxy配置文件

    # 高可用软件
    cat > /etc/haproxy/haproxy.cfg <

部署TLS

apiserver 动态签署颁发到Node节点,实现证书签署自动化

创建集群配置文件
# 只需要在一台节点上执行
export KUBE_APISERVER="https://192.168.15.56:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置客户端认证参数,此处token必须用上叙token.csv中的token
kubectl config set-credentials "kubelet-bootstrap" \
  --token=928419f8458958ce0a80ad37fe5d47c1 \  # 使用自己的token.csv里面的token
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
颁发证书
# 颁发集群配置文件
[root@k8s-m-01 /opt/cert/k8s]# for i in m1 m2 m3; do
> scp kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/
> done
创建TLS低权限用户
# 创建一个低权限用户
[root@k8s-m-01 /opt/cert/k8s]# kubectl create clusterrolebinding kubelet-bootstrap \
> --clusterrole=system:node-bootstrapper \
> --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

部署contorller-manager

编辑配置文件
# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF
注册服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
启动
[root@k8s-m-01 /opt/cert/k8s]# systemctl daemon-reload 
[root@k8s-m-01 /opt/cert/k8s]# systemctl enable --now kube-controller-manager.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.

部署kube-scheduler

编写配置文件
# 三台机器上都需要执行
cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1 "
EOF
注册服务
# 三台节点上都需要执行
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
启动
[root@k8s-m-01 /opt/cert/k8s]# systemctl daemon-reload 
[root@k8s-m-01 /opt/cert/k8s]# systemctl enable --now kube-scheduler.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.

查看集群状态

[root@k8s-m-01 /opt/cert/k8s]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   

部署kubelet服务

创建kubelet服务配置文件
# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname`

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=${KUBE_HOSTNAME} \\
--container-runtime=docker \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sos/pause:3.2"
EOF
  • 创建kubelet-config.yaml

    # 需要在三台master节点上执行
    KUBE_HOSTNAME=hostname -i
    
    cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: ${KUBE_HOSTNAME}
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS:
    - 10.96.0.2
    clusterDomain: cluster.local
    failSwapOn: false
    authentication:
    anonymous:
      enabled: false
    webhook:
      cacheTTL: 2m0s
      enabled: true
    x509:
      clientCAFile: /etc/kubernetes/ssl/ca.pem
    authorization:
    mode: Webhook
    webhook:
      cacheAuthorizedTTL: 5m0s
      cacheUnauthorizedTTL: 30s
    evictionHard:
    imagefs.available: 15%
    memory.available: 100Mi
    nodefs.available: 10%
    nodefs.inodesFree: 5%
    maxOpenFiles: 1000000
    maxPods: 110
    EOF
注册kubelet的服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
启动
[root@k8s-m-01 /opt/cert/k8s]# systemctl daemon-reload 
[root@k8s-m-01 /opt/cert/k8s]# systemctl enable --now kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@k8s-m-01 /opt/cert/k8s]# 

部署kube-proxy

创建配置文件
# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF
  • 创建kube-proxy-config.yml

    # 需要在三台master节点上执行
    KUBE_HOSTNAME=hostname -i
    HOSTNAME=hostname
    cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
    kind: KubeProxyConfiguration
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: ${KUBE_HOSTNAME}
    healthzBindAddress: ${KUBE_HOSTNAME}:10256
    metricsBindAddress: ${KUBE_HOSTNAME}:10249
    clientConnection:
    burst: 200
    kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
    qps: 100
    hostnameOverride: ${HOSTNAME}
    clusterCIDR: 10.96.0.0/16
    enableProfiling: true
    mode: "ipvs"
    kubeProxyIPTablesConfiguration:
    masqueradeAll: false
    kubeProxyIPVSConfiguration:
    scheduler: rr
    excludeCIDRs: []
    EOF
注册服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
启动
[root@k8s-m-01 /opt/cert/k8s]# 
[root@k8s-m-01 /opt/cert/k8s]# systemctl daemon-reload 
[root@k8s-m-01 /opt/cert/k8s]# systemctl enable --now kube-proxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.

加入集群节点

查看集群节点加入请求
# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/cert/k8s]# kubectl get csr
NAME                                                   AGE    SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-5AWYEWZ0DkF4DzHTOP00M2_Ne6on7XMwvryxbwsh90M   6m3s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-8_Rjm9D7z-04h400v_8RDHHCW3UGILeSRhxx-KkIWNI   6m3s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-wlHMJiNAkMuPsQPoD6dan8QF4AIlm-x_hVYJt9DukIg   6m2s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
批准加入
# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/cert/k8s]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
certificatesigningrequest.certificates.k8s.io/node-csr-5AWYEWZ0DkF4DzHTOP00M2_Ne6on7XMwvryxbwsh90M approved
certificatesigningrequest.certificates.k8s.io/node-csr-8_Rjm9D7z-04h400v_8RDHHCW3UGILeSRhxx-KkIWNI approved
certificatesigningrequest.certificates.k8s.io/node-csr-wlHMJiNAkMuPsQPoD6dan8QF4AIlm-x_hVYJt9DukIg approved
[root@k8s-m-01 /opt/cert/k8s]# kubectl get nodes
NAME       STATUS   ROLES    AGE   VERSION
k8s-m-01   Ready    <none>   13s   v1.18.8
k8s-m-02   Ready    <none>   12s   v1.18.8
k8s-m-03   Ready    <none>   12s   v1.18.8

安装网络插件

本次选择使用flannel网络插件

下载flannel安装包并安装
# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/data]# tar -xf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-m-01 /opt/data]# for i in m1 m2 m3;do
> scp flanneld mk-docker-opts.sh root@$i:/usr/local/bin/
> done
将flannel配置写入集群数据库
# 只需要在一台节点上执行即可
etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379" \
mk /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'
注册flannel服务
# 需要在三台机器运行
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
  -etcd-cafile=/etc/etcd/ssl/ca.pem \\
  -etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  -etcd-endpoints=https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379 \\
  -etcd-prefix=/coreos.com/network \\
  -ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
修改docker启动文件
# 让flannel接管docker网络
sed -i '/ExecStart/s/\(.*\)/#\1/' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env' /usr/lib/systemd/system/docker.service
启动
# 先启动flannel,再启动docker
[root@k8s-m-01 ~]# systemctl daemon-reload 
[root@k8s-m-01 ~]# systemctl enable --now flanneld.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.requires/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@k8s-m-01 ~]# systemctl restart docker
验证集群网络
# 集群节点互ping对方的flannel网络

安装集群DNS

# 只需要在一台节点上执行即可
# 下载DNS安装配置文件包
[root@k8s-m-01 ~]# wget https://github.com/coredns/deployment/archive/refs/heads/master.zip
[root@k8s-m-01 ~]# unzip master.zip
[root@k8s-m-01 ~]# cd deployment-master/kubernetes

# 执行部署命令
[root@k8s-m-01 ~/deployment-master/kubernetes]# ./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

# 验证集群DNS
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl get pods -n kube-system
NAME                      READY   STATUS    RESTARTS   AGE
coredns-6ff445f54-m28gw   1/1     Running   0          48s

验证集群

# 绑定一下超管用户(只需要在一台服务器上执行即可)
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

# 验证集群DNS和集群网络成功
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.96.0.2
Address 1: 10.96.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

2.k8s二进制安装

二进制安装k8s

节点名称 IP
k8s-master-01 172.16.1.71
k8s-master-02 172.16.1.72
k8s-master-03 172.16.1.73
k8s-node-01 172.16.1.74
k8s-node-02 172.16.1.75
# 修改IP和主机名

# 关闭防火墙和selinux

# host解析 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# vim /etc/hosts
----------------------------------------------------------------------
172.16.1.71  k8s-master-01 m1
172.16.1.72  k8s-master-02 m2
172.16.1.73  k8s-master-03 m3
172.16.1.74  k8s-node-01   n1
172.16.1.75  k8s-node-02   n2
----------------------------------------------------------------------

# 关闭swap分区  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# vim /etc/fstab
----------------------------------------------------------------------
# UUID=43e1bca3-991b-4cbf-bf73-e198e975f24e swap                    swap    defaults        0 0
---------------------------------------------------------------------- 

# 设置忽略swap分区  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet 

# 关闭selinux (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# sed -i 's#enforcing#disabled#g' /etc/selinux/config  # 永久关闭
[root@k8s-master-01 ~]# setenforce 0   # 临时关闭

# 刷新缓存 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum makecache 

# 更新系统 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum update -y --exclud=kernel* 

# 更新内核版本
[root@k8s-master-01 ~]# ll   # 上传包
-r-xr-xr-x  1 root root 41857400 2021-01-18 09:20 kernel-lt-4.4.245-1.el7.elrepo.x86_64.rpm
-r-xr-xr-x  1 root root 10731836 2021-01-18 09:19 kernel-lt-devel-4.4.245-1.el7.elrepo.x86_64.rpm

# 做五台机器的免密
[root@k8s-master-01 ~]# ssh-keygen
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.71
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.72
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.73
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.74
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.75

# 把/root的文件传给另外两台机器
[root@k8s-master-01 ~]# for i in n1 n2; do scp kernel* $i:/root;done

# 安装  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum localinstall -y kernel*

# 更新内核版本  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum localinstall -y kernel-lt*   安装
[root@k8s-master-01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg  # 设置启动优先级
[root@k8s-master-01 ~]# grubby --default-kernel   # 查看内核版本

# 安装ipvs (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

# 加载IPVS模块  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
  fi
done
EOF

# 测试是否成功 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 优化系统内核参数 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 重启  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# reboot

# 查看内核   (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# uname -a

# 安装基础软件  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 安装docker  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master-01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master-01 ~]# yum install docker-ce -y   # 安装
[root@k8s-master-01 ~]# sudo mkdir -p /etc/docker
[root@k8s-master-01 ~]#  sudo tee /etc/docker/daemon.json <<-'EOF'    
{
  "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"]
}
EOF
[root@k8s-master-01 ~]# sudo systemctl daemon-reload ; systemctl restart docker;systemctl enable --now docker.service
[root@k8s-master-01 ~]# docker info  # 测试是否安装上

# 同步时间  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# crontab -e
--------------------------------------------------------------------
* * * * *  /usr/sbin/ntpdate  ntp.aliyun.com  &> /dev/null
--------------------------------------------------------------------

安装cfssl证书生成工具

# 下载
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
[root@k8s-master-01 ~]# chmod +x cfssljson_linux-amd64
[root@k8s-master-01 ~]# chmod +x cfssl_linux-amd64

# 移动到/usr/local/bin
[root@k8s-master-01 ~]# mv cfssljson_linux-amd64 cfssljson
[root@k8s-master-01 ~]# mv cfssl_linux-amd64 cfssl
[root@k8s-master-01 ~]# mv cfssljson cfssl /usr/local/bin

# 验证
[root@k8s-master-01 ~]# cfssl version
Version: 1.2.0
Revision: dev
Runtime: go1.6

创建集群根证书

[root@k8s-master-01 ~]#  mkdir -p /opt/cert/ca
[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "8760h"
      }
    }
  }
}
EOF

创建根CA证书签名请求文件

[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "ShangHai",
    "L": "ShangHai"
  }]
}
EOF

生成根证书

[root@k8s-master-01 ~]# cd /opt/cert/ca/
[root@k8s-master-01 /opt/cert/ca]# ll
-rw-r--r-- 1 root root 285 2021-01-19 15:28 ca-config.json
-rw-r--r-- 1 root root 153 2021-01-19 15:29 ca-csr.json
[root@k8s-master-01 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

部署ETCD

[root@k8s-master-01 /opt/cert/ca]# mkdir /opt/data
[root@k8s-master-01 /opt/cert/ca]# cd /opt/data
[root@k8s-master-01 /opt/data]# wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# tar xf etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd etcd-v3.3.24-linux-amd64/
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]#for i in m1 m2 m3;do scp etc* $i:/usr/local/bin;done

# 测试
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]# etcd --version
etcd Version: 3.3.24
Git SHA: bdd57848d
Go Version: go1.12.17
Go OS/Arch: linux/amd64

创建ETCD证书

[root@k8s-master-01 ~]# mkdir -p /opt/cert/etcd
[root@k8s-master-01 ~]# cd /opt/cert/etcd
[root@k8s-master-01 /opt/cert/etcd]# cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "172.16.1.71",
    "172.16.1.72",
    "172.16.1.73",
    "172.16.1.74",
    "172.16.1.75"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "ShangHai",
          "L": "ShangHai"
        }
    ]
}
EOF
[root@k8s-master-01 /opt/cert/etcd]# ll
-rw-r--r-- 1 root root 335 2021-01-19 15:40 etcd-csr.json

生成证书

[root@k8s-master-01 /opt/cert/etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@k8s-master-01 /opt/cert/etcd]# ll
总用量 16
-rw-r--r-- 1 root root 1041 2021-01-19 15:41 etcd.csr
-rw-r--r-- 1 root root  335 2021-01-19 15:40 etcd-csr.json
-rw------- 1 root root 1675 2021-01-19 15:41 etcd-key.pem
-rw-r--r-- 1 root root 1371 2021-01-19 15:41 etcd.pem

分发证书

[root@k8s-master-01 /opt/cert/etcd]# for ip in m1 m2 m3 n1 n2;do
  ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
  scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
  scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl
done

注册etcd服务. (master机器都要做,太长不复制)

[root@k8s-master-01 /opt/cert/etcd]# cd
[root@k8s-master-01 ~]# ETCD_NAME=`hostname`
[root@k8s-master-01 ~]# INTERNAL_IP=`hostname -i`
[root@k8s-master-01 ~]# INITIAL_CLUSTER=k8s-master-01=https://172.16.1.71:2380,k8s-master-02=https://172.16.1.72:2380,k8s-master-03=https://172.16.1.73:2380
[root@k8s-master-01 ~]# cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动etcd (master机器都要做,太长不复制)

[root@k8s-master-01 ~]# systemctl enable --now etcd
[root@k8s-master-01 ~]# systemctl status etcd

如果失败才看记录 journalctl  -xe

测试ETCD集群

[root@k8s-master-01 ~]# ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \
endpoint status --write-out='table'
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.71:2379 | 80d0ace027643b4e |  3.3.24 |   20 kB |      true |         7 |          9 |
| https://172.16.1.72:2379 | 9a7cf2dc57ec669f |  3.3.24 |   20 kB |     false |         7 |          9 |
| https://172.16.1.73:2379 | 54f8db1a175b9c73 |  3.3.24 |   20 kB |     false |         7 |          9 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+

创建master CA节点证书

[root@k8s-master-01 ~]# mkdir /opt/cert/k8s
[root@k8s-master-01 ~]# cd /opt/cert/k8s/
[root@k8s-master-01 /opt/cert/k8s]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

创建kube-apiserver证书

[root@k8s-master-01 /opt/cert/k8s]# cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

创建kube-controller-manager证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-controller-manager",
            "OU": "System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

创建kube-scheduler证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-scheduler",
            "OU": "System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

创建kube-proxy证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-proxy-csr.json << EOF
{
    "CN":"system:kube-proxy",
    "hosts":[],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:kube-proxy",
            "OU":"System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

签发管理员用户证书

[root@k8s-master-01 /opt/cert/k8s]#  cat > admin-csr.json << EOF
{
    "CN":"admin",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:masters",
            "OU":"System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

颁发证书

[root@k8s-master-01 /opt/cert/k8s]# mkdir /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# ll /etc/kubernetes/ssl/
总用量 48
-rw------- 1 root root 1679 2021-01-19 16:11 admin-key.pem
-rw-r--r-- 1 root root 1363 2021-01-19 16:11 admin.pem
-rw------- 1 root root 1675 2021-01-19 16:04 ca-key.pem
-rw-r--r-- 1 root root 1281 2021-01-19 16:04 ca.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1476 2021-01-19 16:06 kube-controller-manager.pem
-rw------- 1 root root 1679 2021-01-19 16:07 kube-proxy-key.pem
-rw-r--r-- 1 root root 1383 2021-01-19 16:07 kube-proxy.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-scheduler-key.pem
-rw-r--r-- 1 root root 1452 2021-01-19 16:06 kube-scheduler.pem
-rw------- 1 root root 1679 2021-01-19 16:05 server-key.pem
-rw-r--r-- 1 root root 1558 2021-01-19 16:05 server.pem
[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3 ;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

下载安装包

[root@k8s-master-01 ~]#  cd /opt/data

# 下载server安装包(应该下载不了)
[root@k8s-master-01 /opt/data]# wget https://dl.k8s.io/v1.18.8/kubernetes-server-linux-amd64.tar.gz

[root@k8s-master-01 /opt/data]# docker run -dit registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash
[root@k8s-master-01 /opt/data]# docker ps  (看ID)
[root@k8s-master-01 /opt/data]# docker exec fdeed1e0b5a1 ls
[root@k8s-master-01 /opt/data]# docker cp fdeed1e0b5a1:kubernetes-server-linux-amd64.tar.gz .
[root@k8s-master-01 /opt/data]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd kubernetes/server/bin/
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# for i in m1 m2 m3;do scp kube-apiserver kube-controller-manager kubectl kubelet  kube-proxy kube-scheduler $i:/usr/local/bin ; done

# 测试(三台master机器都要测试)
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# kube-apiserver --version
Kubernetes v1.18.8

创建kube-controller-manager集群配置文件

[root@k8s-master-01 /opt/data/kubernetes/server/bin]# cd /opt/cert/k8s/

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig

# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-controller-manager" \
  --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-controller-manager" \
  --kubeconfig=kube-controller-manager.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6289 2021-01-19 16:32 kube-controller-manager.kubeconfig   # 其中一个文件

创建kube-scheduler集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]#kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig

# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-scheduler" \
  --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-scheduler" \
  --kubeconfig=kube-scheduler.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6239 2021-01-19 16:36 kube-scheduler.kubeconfig   # 其中一个文件

创建kube-proxy集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-proxy" \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-proxy" \
  --kubeconfig=kube-proxy.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6139 2021-01-19 16:38 kube-proxy.kubeconfig     # 其中一个文件

创建集群管理员集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig

# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "admin" \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --client-key=/etc/kubernetes/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="admin" \
  --kubeconfig=admin.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=admin.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6105 2021-01-19 16:41 admin.kubeconfig  # 其中一个文件

配置TLS bootstrapping

[root@k8s-master-01 /opt/cert/k8s]# TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
[root@k8s-master-01 /opt/cert/k8s]# cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

[root@k8s-master-01 /opt/cert/k8s]# ll
-rw-r--r-- 1 root root   84 2021-01-19 16:51 token.csv   # 其中一个文件
[root@k8s-master-01 /opt/cert/k8s]# cat token.csv
3358c2b56753366ebf7d02bb00eeb3fc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

创建bootstrapping集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置客户端认证参数,此处token必须用上叙token.csv中的token (3358c2b56753366ebf7d02bb00eeb3fc)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kubelet-bootstrap" \
  --token=3358c2b56753366ebf7d02bb00eeb3fc \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 2061 2021-01-19 16:55 kubelet-bootstrap.kubeconfig   # 其中一个文件

分发集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# for i in m1 m2 m3; do
   ssh root@$i "mkdir -p  /etc/kubernetes/cfg";
   scp token.csv kube-scheduler.kubeconfig kube-controller-manager.kubeconfig admin.kubeconfig kube-proxy.kubeconfig kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg;
 done

部署kube-apiserver (三台master机器上面都需要执行)

[root@k8s-master-01 /opt/cert/k8s]#  KUBE_APISERVER_IP=`hostname -i`

[root@k8s-master-01 /opt/cert/k8s]#  cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=10-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF

# 如果hostname -i获取的是外网IP,则需要执行:
[root@k8s-master-01 /opt/cert/k8s]#  sed -i 's#192.168.13#172.16.1#g' /etc/kubernetes/cfg/kube-apiserver.conf

注册kube-apiserver服务

[root@k8s-master-01 /opt/cert/k8s]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/kube-apiserver.service;done

[root@k8s-master-01 /opt/cert/k8s]# mkdir -p /var/log/kubernetes/
[root@k8s-master-01 /opt/cert/k8s]# systemctl daemon-reload
[root@k8s-master-01 /opt/cert/k8s]# systemctl enable --now kube-apiserver

# 查看是否启动 (三台master机器上面都需要查看)
[root@k8s-master-01 /opt/cert/k8s]# systemctl status kube-apiserver

kube-apiserver高可用

[root@k8s-master-01 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-02 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# cat > /etc/haproxy/haproxy.cfg <<EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin

frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master-01    172.16.1.71:6443  check inter 2000 fall 2 rise 2 weight 100
  server k8s-master-02    172.16.1.72:6443  check inter 2000 fall 2 rise 2 weight 100
  server k8s-master-03    172.16.1.73:6443  check inter 2000 fall 2 rise 2 weight 100
EOF

[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /etc/haproxy/haproxy.cfg $i:/etc/haproxy/haproxy.cfg;done

[root@k8s-master-01 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-02 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-03 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-01 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-02 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-03 /opt/cert/k8s]# systemctl status haproxy.service

[root@k8s-master-01 /opt/cert/k8s]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
[root@k8s-master-01 /opt/cert/k8s]# cd /etc/keepalived
[root@k8s-master-01 /opt/cert/k8s]# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface eth1
    mcast_src_ip 172.16.1.71
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}
EOF

[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp keepalived.conf $i:/etc/keepalived/keepalived.conf;done

## k8s-master-02操作
[root@k8s-master-02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth1
    mcast_src_ip 172.16.1.72
    virtual_router_id 51
    priority 90
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}

## k8s-master-03操作
[root@k8s-master-03 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth1
    mcast_src_ip 172.16.1.73
    virtual_router_id 51
    priority 80
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}

设置监控检车脚本

[root@k8s-master-03 ~]# cat > /etc/keepalived/check_kubernetes.sh <<EOF
#!/bin/bash

function chech_kubernetes() {
    for ((i=0;i<5;i++));do
        apiserver_pid_id=$(pgrep kube-apiserver)
        if [[ ! -z $apiserver_pid_id ]];then
            return
        else
            sleep 2
        fi
        apiserver_pid_id=0
    done
}

# 1:running  0:stopped
check_kubernetes
if [[ $apiserver_pid_id -eq 0 ]];then
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp /etc/keepalived/check_kubernetes.sh $i:/etc/keepalived/check_kubernetes.sh; done

# 给监控脚本加权限(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-02 ~]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-03 ~]# chmod +x /etc/keepalived/check_kubernetes.sh

# 动keeplived和haproxy服务 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now keepalived haproxy
[root@k8s-master-02 ~]# systemctl enable --now keepalived haproxy
[root@k8s-master-03 ~]# systemctl enable --now keepalived haproxy

# 查看是否启动 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status keepalived.service
[root@k8s-master-02 ~]# systemctl status keepalived.service
[root@k8s-master-03 ~]# systemctl status keepalived.service

# 查看是否有vip (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# ip a | grep 172.16.1.80    # 有
    inet 172.16.1.80/32 scope global eth1
[root@k8s-master-02 ~]# ip a | grep 172.16.1.80   # 没有
[root@k8s-master-03 ~]# ip a | grep 172.16.1.80   # 没有

授权TLS Bootrapping用户请求

[root@k8s-master-01 /etc/keepalived]# kubectl create clusterrolebinding kubelet-bootstrap \
 --clusterrole=system:node-bootstrapper \
 --user=kubelet-bootstrap

创建kube-controller-manager配置文件

[root@k8s-master-01 /etc/keepalived]# cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF

[root@k8s-master-01 /etc/keepalived]# cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-master-01 /etc/keepalived]# for i in m1 m2 m3; do   scp /etc/kubernetes/cfg/kube-controller-manager.conf root@$i:/etc/kubernetes/cfg;   scp /usr/lib/systemd/system/kube-controller-manager.service root@$i:/usr/lib/systemd/system/kube-controller-manager.service; done

# 启动kube-controller-manager.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now kube-controller-manager.service
[root@k8s-master-02 ~]# systemctl enable --now kube-controller-manager.service
[root@k8s-master-03 ~]# systemctl enable --now kube-controller-manager.service

# 查看kube-controller-manager.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status kube-controller-manager.service
[root@k8s-master-02 ~]# systemctl status kube-controller-manager.service
[root@k8s-master-03 ~]# systemctl status kube-controller-manager.service

创建kube-scheduler配置

[root@k8s-master-01 /etc/keepalived]# cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1 "
EOF

[root@k8s-master-01 /etc/keepalived]# cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-master-01 /etc/keepalived]# for ip in m2 m3; do scp /usr/lib/systemd/system/kube-scheduler.service root@${ip}:/usr/lib/systemd/system;     scp /etc/kubernetes/cfg/kube-scheduler.conf root@${ip}:/etc/kubernetes/cfg; done

[root@k8s-master-01 /etc/keepalived]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl daemon-reload

# 启动kube-scheduler.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now kube-scheduler.service
[root@k8s-master-02 ~]# systemctl enable --now kube-scheduler.service
[root@k8s-master-02 ~]# systemctl enable --now kube-scheduler.service

# 查看kube-scheduler.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status kube-scheduler.service
[root@k8s-master-02 ~]# systemctl status kube-scheduler.service
[root@k8s-master-02 ~]# systemctl status kube-scheduler.service

查看集群状态

[root@k8s-master-01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}
etcd-1               Healthy   {"health":"true"}

部署kubelet服务

[root@k8s-master-01 ~]#  KUBE_HOSTNAME=`hostname`

[root@k8s-master-01 ~]#  cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=${KUBE_HOSTNAME} \\
--container-runtime=docker \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sos/pause:3.2"
EOF

[root@k8s-master-01 ~]# cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 172.16.1.71
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

[root@k8s-master-01 ~]# cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 6.11.5.7.6、分发kubelet
[root@k8s-master-01 ~]# for ip in m2 m3;do  scp /etc/kubernetes/cfg/{kubelet-config.yml,kubelet.conf} root@${ip}:/etc/kubernetes/cfg;     scp /usr/lib/systemd/system/kubelet.service root@${ip}:/usr/lib/systemd/system; done

# master-02操作
[root@k8s-master-02 ~]# sed -i 's#master-01#master-02#g' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-master-02 ~]# sed -i 's#172.16.1.71#172.16.1.72#g' /etc/kubernetes/cfg/kubelet-config.yml

# master-03操作
[root@k8s-master-03 ~]# sed -i 's#master-01#master-03#g' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-master-03 ~]# sed -i 's#172.16.1.71#172.16.1.73#g' /etc/kubernetes/cfg/kubelet-config.yml

# 开启kubelet服务
[root@k8s-master-01 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service
[root@k8s-master-02 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service
[root@k8s-master-03 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service

部署kube-peoxy服务

[root@k8s-master-03 ~]#  cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-master-03 ~]#  cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 172.16.1.71
healthzBindAddress: 172.16.1.71:10256
metricsBindAddress: 172.16.1.71:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: k8s-master-01
clusterCIDR: 10.96.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF

[root@k8s-master-03 ~]#  cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF

[root@k8s-master-01 ~]# for ip in  m2 m3;do
scp /etc/kubernetes/cfg/{kube-proxy-config.yml,kube-proxy.conf} root@${ip}:/etc/kubernetes/cfg/
scp /usr/lib/systemd/system/kube-proxy.service root@${ip}:/usr/lib/systemd/system/
done

# master-02操作
[root@k8s-master-02 ~]# sed -i 's#172.16.1.71#172.16.1.72#g' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-master-02 ~]# sed -i 's#master-01#master-02#g' /etc/kubernetes/cfg/kube-proxy-config.yml

# master-03操作
[root@k8s-master-03 ~]# sed -i 's#172.16.1.71#172.16.1.73#g' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-master-03 ~]# sed -i 's#master-01#master-03#g' /etc/kubernetes/cfg/kube-proxy-config.yml

# 3台master都要操作
[root@k8s-master-01 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy
[root@k8s-master-02 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy
[root@k8s-master-03 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy

# 查看
[root@k8s-master-01 /opt/cert/k8s]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-GUPj9U-cv5F5WtBMlWJByKsCa5OkV--6nPPSneRkxU0   15s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-U3uVDsdmGlnv7vQD-Rieui3YQpW5pK6sZ56BtsOpLgQ   18s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-zECR2kT2FCF6ZkIN9mwyNfyn_zPyzGE54SQF47enU08   38s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

批准加入

[root@k8s-master-01 /opt/cert/k8s]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
certificatesigningrequest.certificates.k8s.io/node-csr-GUPj9U-cv5F5WtBMlWJByKsCa5OkV--6nPPSneRkxU0 approved
certificatesigningrequest.certificates.k8s.io/node-csr-U3uVDsdmGlnv7vQD-Rieui3YQpW5pK6sZ56BtsOpLgQ approved
certificatesigningrequest.certificates.k8s.io/node-csr-zECR2kT2FCF6ZkIN9mwyNfyn_zPyzGE54SQF47enU08 approved
[root@k8s-master-01 /opt/cert/k8s]# kubectl get nodes
NAME            STATUS     ROLES    AGE   VERSION
k8s-master-01   Ready      <none>   43s   v1.18.8
k8s-master-02   Ready      <none>   14s   v1.18.8
k8s-master-03   NotReady   <none>   1s    v1.18.8

部署网络插件

# 方式一: (如果方式一行不通则用第二种)
[root@k8s-master-01 /opt/cert/k8s]# cd /opt/data/
[root@k8s-master-01 /opt/data/]# wget https://github.com/coreos/flannel/releases/download/v0.13.1-rc1/flannel-v0.13.1-rc1-linux-amd64.tar.gz

# 第二种方法:
[root@k8s-master-01 /opt/data]# docker run -dit registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash
[root@k8s-master-01 /opt/data]# docker ps  # 看id
[root@k8s-master-01 /opt/data]# docker exec 6e678a83701c ls  # 看包名
[root@k8s-master-01 /opt/data]# docker cp  6e678a83701c:flannel-v0.11.0-linux-amd64.tar.gz .
[root@k8s-master-01 /opt/data]# tar xf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# ll
-rwxr-xr-x 1 root      root       35249016 Jan 29  2019 flanneld
-rw-r--r-- 1 root      root        9565743 Jan 29  2019 flannel-v0.11.0-linux-amd64.tar.gz
-rwxr-xr-x 1 root      root           2139 Oct 23  2018 mk-docker-opts.sh
[root@k8s-master-01 /opt/data]# for i in m1 m2 m3;do scp flanneld mk-docker-opts.sh  root@$i:/usr/local/bin; done

[root@k8s-master-01 /opt/data]# etcdctl \
 --ca-file=/etc/etcd/ssl/ca.pem \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem \
 --endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \
 mk /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'

# 使用get查看信息
[root@k8s-master-01 /opt/data]# etcdctl \
 --ca-file=/etc/etcd/ssl/ca.pem \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem \
 --endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \
 get /coreos.com/network/config

注册网络插件服务

[root@k8s-master-01 /opt/data]# cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
  -etcd-cafile=/etc/etcd/ssl/ca.pem \\
  -etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  -etcd-endpoints=https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379 \\
  -etcd-prefix=/coreos.com/network \\
  -ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

[root@k8s-master-01 /opt/data]# for i in m2 m3;do scp /usr/lib/systemd/system/flanneld.service root@$i:/usr/lib/systemd/system;done

[root@k8s-master-01 /opt/data]# sed -i '/ExecStart/s/\(.*\)/#\1/' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# sed -i '/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# sed -i '/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# for ip in m2 m3;do scp /usr/lib/systemd/system/docker.service root@${ip}:/usr/lib/systemd/system; done

[root@k8s-master-01 /opt/data]# systemctl daemon-reload
[root@k8s-master-01 /opt/data]# systemctl start flanneld
[root@k8s-master-01 /opt/data]# systemctl enable --now flanneld.service
[root@k8s-master-01 /opt/data]# systemctl restart docker

[root@k8s-master-02 ~]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl start flanneld
[root@k8s-master-02 ~]# systemctl enable --now flanneld.service
[root@k8s-master-02 ~]# systemctl restart docker

[root@k8s-master-03 ~]# systemctl daemon-reload
[root@k8s-master-03 ~]# systemctl start flanneld

[root@k8s-master-03 ~]# systemctl restart docker

[root@k8s-master-02 ~]# ip a  # 查看
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    inet 10.241.200.1/21 brd 10.241.207.255 scope global docker0
7: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
    inet 10.241.200.0/32 scope global flannel.1

安装集群DNS

[root@k8s-master-01 /opt/data]# yum install git -y
[root@k8s-master-01 /opt/data]# git clone https://github.com/coredns/deployment.git
[root@k8s-master-01 /opt/data]# cd /opt/data/deployment/kubernetes
[root@k8s-master-01 /opt/data/deployment/kubernetes]# sed -i 's#coredns/coredns#registry.cn-hangzhou.aliyuncs.com/k8sos/coredns#g' coredns.yaml.sed
[root@k8s-master-01 /opt/data/deployment/kubernetes]# ./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

# 测试:
[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl get pods -n kube-system
NAME                       READY   STATUS    RESTARTS   AGE
coredns-6cb6557c66-7cx62   1/1     Running   0          60s

绑定超管权限

[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

测试集群DNS

[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ #  nslookup kubernetes
Server:    10.96.0.2
Address 1: 10.96.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

加入工作节点

# 修改node上docker的文件系统
[root@k8s-node-01 /etc/kubernetes/cfg]# cat /etc/docker/daemon.json
{
  "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=cgroupfs"]
}

[root@k8s-node-02 /etc/kubernetes/cfg]# cat /etc/docker/daemon.json
{
  "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=cgroupfs"]
}

# 在两node上重启docker
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl restart docker
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl restart docker

# 分发组件 (在master01上执行)
[root@k8s-master-01 /opt/data/deployment/kubernetes]# cd /opt/data/kubernetes/server/bin
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# for i in n1 n2;do 
 scp kubelet kube-proxy $i:/usr/local/bin
 done

 # 分发证书  (在master01上执行)
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# cd /opt/cert/k8s
[root@k8s-master-01 /opt/cert/k8s]# for i in n1 n2; do   ssh root@$i "mkdir -pv /etc/kubernetes/ssl";   scp -pr ./{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl;  done

# 分发网络插件安装包  (在master01上执行)
[root@k8s-master-01 /opt/cert/k8s]# cd /opt/data
[root@k8s-master-01 /opt/data]# for i in n1 n2;do scp flanneld mk-docker-opts.sh $i:/usr/local/bin; done

# 分发kubelet配置及启动脚本  (在master01上执行)
[root@k8s-master-01 /opt/data]# cd /etc/kubernetes/cfg
[root@k8s-master-01 /etc/kubernetes/cfg]# for ip in n1 n2; do ssh root@${ip} "mkdir -pv /var/log/kubernetes"; ssh root@${ip} "mkdir -pv /etc/kubernetes/cfg/";     scp /etc/kubernetes/cfg/{kubelet-config.yml,kubelet.conf,kubelet-bootstrap.kubeconfig} root@${ip}:/etc/kubernetes/cfg;     scp /usr/lib/systemd/system/kubelet.service root@${ip}:/usr/lib/systemd/system; done

# 在node机器上处理配置文件
# 修改k8s-node-01配置
[root@k8s-node-01 /etc/etcd/ssl]# sed -i 's#master-01#node-01#g' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-node-01 /etc/etcd/ssl]# sed -i 's#172.16.1.71#172.16.1.74#g' /etc/kubernetes/cfg/kubelet-config.yml

# 在node-02机器上修改k8s-node-02配置
[root@k8s-node-02 /etc/etcd/ssl]# sed -i 's#master-01#node-02#g' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-node-02 /etc/etcd/ssl]# sed -i 's#172.16.1.71#172.16.1.75#g' /etc/kubernetes/cfg/kubelet-config.yml

# 在两台node机器上启动kubelet.service
[root@k8s-node-01 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kubelet.service
[root@k8s-node-02 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kubelet.service

# 在两台node机器上查看kubelet.service
[root@k8s-node-01 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kubelet.service
[root@k8s-node-02 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kubelet.service

# 分发kube-proxy配置  (在master01上执行)
[root@k8s-master-01 /etc/etcd/ssl]# cd /etc/kubernetes/cfg
[root@k8s-master-01 /etc/kubernetes/cfg]#  for ip in n1 n2;do     scp /etc/kubernetes/cfg/{kube-proxy-config.yml,kube-proxy.conf,kube-proxy.kubeconfig} root@${ip}:/etc/kubernetes/cfg/;     scp /usr/lib/systemd/system/kube-proxy.service root@${ip}:/usr/lib/systemd/system/; done

# 上分发证书  (在master01上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# cd /opt/cert/k8s
[root@k8s-master-01 /opt/cert/k8s]# for i in n1 n2; do   ssh root@$i "mkdir -pv /etc/kubernetes/ssl";   scp -pr ./{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl;  done

# 在node机器上修改配置文件
# 修改kubernetes-node-01节点
[root@k8s-node-01 /etc/etcd/ssl]# sed -i 's#172.16.1.71#172.16.1.75#g' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-node-01 /etc/etcd/ssl]# sed -i 's#master-01#node-01#g' /etc/kubernetes/cfg/kube-proxy-config.yml

# 修改kubernetes-node-02节点
[root@k8s-node-02 /etc/etcd/ssl]# sed -i 's#172.16.1.71#172.16.1.75#g' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-node-02 /etc/etcd/ssl]# sed -i 's#master-01#node-02#g' /etc/kubernetes/cfg/kube-proxy-config.yml

# 在node机器上启动kube-proxy.service服务
[root@k8s-node-01 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kube-proxy.service
[root@k8s-node-02 /etc/etcd/ssl]# systemctl daemon-reload ;systemctl enable --now kube-proxy.service

# 查看kube-proxy.service的状态
[root@k8s-node-01 /etc/etcd/ssl]# systemctl status kube-proxy.service
[root@k8s-node-02 /etc/etcd/ssl]# systemctl status kube-proxy.service

# 配置工作节点连接etcd  (在master01上执行)
[root@k8s-master-01 /opt/cert/k8s]#  cd /etc/kubernetes/cfg
[root@k8s-master-01 /etc/kubernetes/cfg]#  for i in n1 n2; do ssh root@$i "mkdir -pv /etc/etcd/ssl"; scp -p /etc/etcd/ssl/*.pem root@$i:/etc/etcd/ssl;  done
[root@k8s-master-01 /etc/kubernetes/cfg]# for i in n1 n2; do scp /opt/data/etcd-v3.3.24-linux-amd64/etcdctl $i:/usr/local/bin;  done

# 在node-01上测试
[root@k8s-node-01 /etc/kubernetes/cfg]# ETCDCTL_API=3 etcdctl  --cacert=/etc/etcd/ssl/etcd.pem  --cert=/etc/etcd/ssl/etcd.pem  --key=/etc/etcd/ssl/etcd-key.pem  --endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379"  endpoint status --write-out='table'
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.71:2379 | 80d0ace027643b4e |  3.3.24 |  3.3 MB |      true |         7 |     122757 |
| https://172.16.1.72:2379 | 9a7cf2dc57ec669f |  3.3.24 |  3.3 MB |     false |         7 |     122757 |
| https://172.16.1.73:2379 | 54f8db1a175b9c73 |  3.3.24 |  3.3 MB |     false |         7 |     122757 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+

# 在node-02上测试
[root@k8s-node-02 /etc/kubernetes/cfg]# ETCDCTL_API=3 etcdctl  --cacert=/etc/etcd/ssl/etcd.pem  --cert=/etc/etcd/ssl/etcd.pem  --key=/etc/etcd/ssl/etcd-key.pem  --endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379"  endpoint status --write-out='table'
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.71:2379 | 80d0ace027643b4e |  3.3.24 |  3.3 MB |      true |         7 |     123442 |
| https://172.16.1.72:2379 | 9a7cf2dc57ec669f |  3.3.24 |  3.3 MB |     false |         7 |     123442 |
| https://172.16.1.73:2379 | 54f8db1a175b9c73 |  3.3.24 |  3.3 MB |     false |         7 |     123442 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+

# 同步docker.service  (在master01上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# for ip in n1 n2; do 
scp /usr/lib/systemd/system/docker.service root@${ip}:/usr/lib/systemd/system; 
scp /usr/lib/systemd/system/flanneld.service root@${ip}:/usr/lib/systemd/system;
done

# 启动网络插件及docker
# 在node-01上执行
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl enable --now flanneld
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl restart docker

# 在node-02上执行
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl enable --now flanneld
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl restart docker

# 加入集群(在master01上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl get csr
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl get nodes
NAME            STATUS   ROLES    AGE     VERSION
k8s-master-01   Ready    <none>   23h     v1.18.8
k8s-master-02   Ready    <none>   23h     v1.18.8
k8s-master-03   Ready    <none>   23h     v1.18.8
k8s-node-01     Ready    <none>   7m35s   v1.18.8
k8s-node-02     Ready    <none>   5m48s   v1.18.8

# 安装软件 (在全部机器上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# yum install -y bash-completion
[root@k8s-master-02 /etc/kubernetes/cfg]# yum install -y bash-completion
[root@k8s-master-03 /etc/kubernetes/cfg]# yum install -y bash-completion
[root@k8s-node-01 /etc/kubernetes/cfg]# yum install -y bash-completion
[root@k8s-node-02 /etc/kubernetes/cfg]# yum install -y bash-completion

# 在master01上执行
[root@k8s-master-01 /etc/kubernetes/cfg]# source /usr/share/bash-completion/bash_completion
[root@k8s-master-01 /etc/kubernetes/cfg]# source <(kubectl completion bash)
[root@k8s-master-01 /etc/kubernetes/cfg]# echo "source <(kubectl completion bash)" >> ~/.bashrc

# 配置主从权限 (在master01上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl label nodes k8s-master-01 node-role.kubernetes.io/master=k8s-master-01
node/k8s-master-01 labeled
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl label nodes k8s-master-02 node-role.kubernetes.io/master=k8s-master-02
node/k8s-master-02 labeled
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl label nodes k8s-master-03 node-role.kubernetes.io/master=k8s-master-03
node/k8s-master-03 labeled
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl label nodes k8s-node-01 node-role.kubernetes.io/node=k8s-node-01
node/k8s-node-01 labeled
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl label nodes k8s-node-02 node-role.kubernetes.io/node=k8s-node-02
node/k8s-node-02 labeled
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl get nodes
NAME            STATUS   ROLES    AGE     VERSION
k8s-master-01   Ready    master   23h     v1.18.8
k8s-master-02   Ready    master   23h     v1.18.8
k8s-master-03   Ready    master   23h     v1.18.8
k8s-node-01     Ready    node     10m     v1.18.8
k8s-node-02     Ready    node     8m20s   v1.18.8

# 给集群master节点打污点,避免不必要的容器调度到master节点  (在master01上执行)
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl taint nodes k8s-master-01 node-role.kubernetes.io/master=k8s-master-01:NoSchedule --overwrite
node/k8s-master-01 modified
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl taint nodes k8s-master-02 node-role.kubernetes.io/master=k8s-master-02:NoSchedule --overwrite
node/k8s-master-02 modified
[root@k8s-master-01 /etc/kubernetes/cfg]# kubectl taint nodes k8s-master-03 node-role.kubernetes.io/master=k8s-master-03:NoSchedule --overwrite
node/k8s-master-03 modified

# 测试整个集群网络、DNS  (在master01上执行)
[root@k8s-master-01 ~]# kubectl create deployment nginx --image=nginx
[root@k8s-master-01 ~]# kubectl expose deployment nginx --port=80 --type=NodePort
[root@k8s-master-01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)       AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP       22h
nginx        NodePort    10.96.229.172   <none>        80:34745/TCP   4s
[root@k8s-master-01 ~]# curl 192.168.13.71:34745
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>

1、优化命令行

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc