MySQL主从容器化

本次项目为自己玩玩

1、构建镜像

# 配置master /etc/mysql/my.cnf
[root@k8s-m-01 ~]# mkdir -pv /root/mysql/master
[root@k8s-m-01 ~]# cd /root/mysql/master
[root@k8s-m-01 ~]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf
[root@k8s-m-01 ~]# vim my.cnf
[mysql]
socket=/var/lib/mysql/mysql.sock

[mysqld]
user=mysql
port=3306
binlog_format=mixed
log_bin=mysql-bin
socket=/var/lib/mysql/mysql.sock
server_id=1
sync_binlog=1
log-slave-updates=on

# 配置savle /etc/mysql/my.cnf
[root@k8s-m-01 ~]# mkdir -pv /root/mysql/savle
[root@k8s-m-01 ~]# cd /root/mysql/savle
[root@k8s-m-01 ~]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf
[root@k8s-m-01 ~]# vim my.cnf
[mysql]
socket=/var/lib/mysql/mysql.sock

[mysqld]
user=mysql
port=3306
binlog_format=mixed
log_bin=mysql-bin
socket=/var/lib/mysql/mysql.sock
server_id=2
read-only=1

# 创建镜像仓库
在阿里云镜像仓库中创建mysql-master和mysql-savle两个仓库

# 构建镜像

# 构建master镜像
[root@k8s-m-01 ~/mysql/master]# docker build -t registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1 .
Sending build context to Docker daemon  3.072kB
Step 1/2 : FROM mysql:5.7
 ---> a70d36bc331a
Step 2/2 : ADD my.cnf /etc/mysql/my.cnf
 ---> 4d70fb217ff7
Successfully built 4d70fb217ff7
Successfully tagged registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

# 构建salve镜像
[root@k8s-m-01 ~/mysql/salve]# docker build -t registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-savle:v1 .
Sending build context to Docker daemon  3.072kB
Step 1/2 : FROM mysql:5.7
 ---> a70d36bc331a
Step 2/2 : ADD my.cnf /etc/mysql/my.cnf
 ---> df6a73e92015
Successfully built df6a73e92015
Successfully tagged registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-savle:v1

# 推送镜像到仓库
[root@k8s-m-01 ~/mysql/salve]#  docker login --username=yangyang091022 registry.cn-hangzhou.aliyuncs.com
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@k8s-m-01 ~/mysql/salve]# docker push registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

[root@k8s-m-01 ~/mysql/salve]# docker push registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-salve:v1

2、配置master节点

# 部署master节点

# 创建master节点主从复制用户

有一个问题 数据库的id怎么办,在构建镜像的时候 加入启动脚本
FROM registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1

ADD start /root/

WORKDIR /root

CMD "./start"
namespace
kind: Namespace
apiVersion: v1
metadata:
  name: mysql-cluster
kind: Service
apiVersion: v1
metadata:
  name: mysql-cluster-master-svc
  namespace: mysql-cluster
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql-cluster-master-tcp
  selector:
    app: mysql-cluster
    deploy: mysql-cluster-master
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
  name: mysql-cluster-master
  namespace: mysql-cluster
spec:
  selector:
    matchLabels:
      app: mysql-cluster
      deploy: mysql-cluster-master
  serviceName: mysql-cluster-master-svc
  template:
    metadata:
      labels:
        app: mysql-cluster
        deploy: mysql-cluster-master
    spec:
      imagePullSecrets:
        - name: "myregistrykey"
      containers:
        - name: mysql
          image: registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          livenessProbe:
            tcpSocket:
              port: 3306
            initialDelaySeconds: 30
            periodSeconds: 2

          readinessProbe:
            tcpSocket:
              port: 3306
            initialDelaySeconds: 30
            periodSeconds: 2

          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-cluster-master-data
      volumes:
        - name: mysql-cluster-master-data
          persistentVolumeClaim:
            claimName: mysql-cluster-master-data-pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: mysql-cluster
  name: mysql-cluster-master-data-pvc
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "500Gi"

kind: Secret
apiVersion: v1
metadata:
  name: mysql-cluster-master-secret
  namespace: mysql-cluster
data:
  passwd:
---
kind: Job
apiVersion: batch/v1
metadata:
  namespace: mysql-cluster
  name: mysql-cluster-master-create-user-job
spec:
  template:
    metadata:
      labels:
        app: mysql-cluster
        deploy: mysql-cluster-master-job
    spec:
      imagePullSecrets:
        - name: "myregistrykey"
      restartPolicy: OnFailure
      containers:
        - name: mysql
          image: registry.cn-hangzhou.aliyuncs.com/alvinos/mysql-master:v1
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          command:
            - "/bin/bash"
            - "-c"
            - |
              /usr/bin/mysql -hmysql-cluster-master-svc.mysql-cluster.svc.cluster.local -uroot -pcat /opt/passwd -e "GRANT REPLICATION SLAVE ON *.* TO 'mysql'@'%' IDENTIFIED BY 'mysql';FLUSH PRIVILEGES;"

          volumeMounts:
            - mountPath: /opt
              name: mysql-cluster-master-secret
      volumes:
        - name: mysql-cluster-master-secret
          secret:
            secretName: mysql-cluster-master-secret
            items:
              - key: passwd
                path: passwd

特殊存储卷

1、emptyDir

2、hostPath

3、pv/pvc

StorageClass

根据pvc的要求,去自动创建符合要求的pv。

1、按照pvc创建pv

2、减少资源浪费

1、部署存储类

https://github.com/helm/helm

# 下载helm(helm相当于kubernetes中的yum)
[root@k8s-m-01 ~]# wget https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz

[root@k8s-m-01 ~]# tar -xf helm-v3.3.4-linux-amd64.tar.gz 
[root@k8s-m-01 ~]# cd linux-amd64/
[root@k8s-m-01 ~]# for i in m1 m2 m3;do scp helm root@$i:/usr/local/bin/; done

# 测试安装
[root@k8s-m-01 ~]# helm 
The Kubernetes package manager

Common actions for Helm:

- helm search:    search for charts
- helm pull:      download a chart to your local directory to view
- helm install:   upload the chart to Kubernetes
- helm list:      list releases of charts

# 安装存储类

## 安装一个helm的存储库
[root@k8s-m-01 ~]# helm repo add ckotzbauer https://ckotzbauer.github.io/helm-charts
"ckotzbauer" has been added to your repositories
[root@k8s-m-01 ~]# helm repo list
NAME        URL                                     
ckotzbauer  https://ckotzbauer.github.io/helm-charts

## 第一种方式:部署nfs客户端及存储类
[root@k8s-m-01 ~]# helm install nfs-client --set nfs.server=172.16.1.51 --set nfs.path=/nfs/v6  ckotzbauer/nfs-client-provisioner
NAME: nfs-client
LAST DEPLOYED: Fri Apr  9 09:33:23 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

## 查看部署结果
[root@k8s-m-01 ~]# kubectl get pods 
NAME                                                 READY   STATUS        RESTARTS   AGE
nfs-client-nfs-client-provisioner-56dddf479f-h9qqb   1/1     Running       0          41s

[root@k8s-m-01 ~]# kubectl get storageclasses.storage.k8s.io 
NAME         PROVISIONER                                       RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-client-nfs-client-provisioner   Delete          Immediate           true                   61s

## 第二种方式:直接使用配置清单(推荐)
accessModes: ReadWriteMany

### 下载包
[root@k8s-m-01 /opt]# helm pull ckotzbauer/nfs-client-provisioner

### 解压
[root@k8s-m-01 /opt]# tar -xf nfs-client-provisioner-1.0.2.tgz 

### 修改values.yaml
[root@k8s-m-01 /opt]# cd nfs-client-provisioner/
[root@k8s-m-01 /opt/nfs-client-provisioner]# vim values.yaml 
nfs:
  server: 172.16.1.51
  path: /nfs/v6
storageClass:
  accessModes: ReadWriteMany
  reclaimPolicy: Retain

### 安装
[root@k8s-m-01 /opt/nfs-client-provisioner]# helm install nfs-client ./
NAME: nfs-client
LAST DEPLOYED: Fri Apr  9 09:45:47 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

# 测试存储类
## 创建pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"

## 查看pv/pvc
[root@k8s-m-01 /opt/discuz]# kubectl get pv,pvc
NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS     CLAIM                      STORAGECLASS   REASON   AGE
persistentvolume/pvc-589b3377-40cf-4f83-ab06-33bbad83013b   18Gi       RWX            Retain           Bound      default/pv-discuz-pvc-sc   nfs-client              2m35s

NAME                                     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/pv-discuz-pvc-sc   Bound    pvc-589b3377-40cf-4f83-ab06-33bbad83013b   18Gi       RWX            nfs-client     2m35s

## 利用存储类部署一个discuz
#########################################################################################
#  1、部署MySQL集群
#     1、创建命名空间
#     2、创建service提供负载均衡
#     3、使用控制器部署MySQL实例
###
#  2、部署Discuz应用
#     1、创建命名空间
#     2、创建Service提供负载均衡(Headless Service)
#     3、创建服务并挂载代码
#     4、创建Ingress,用于域名转发(https)
###
#  3、服务之间的互连
#     1、Discuz连接MySQL  --->  mysql.mysql.svc.cluster.local
#########################################################################################
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql-svc
  namespace: mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
  selector:
    app: mysql
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-mysql-pvc
  namespace: mysql
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "20Gi"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-deployment
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
      deploy: discuz
  template:
    metadata:
      labels:
        app: mysql
        deploy: discuz
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          livenessProbe:
            tcpSocket:
              port: 3306
          readinessProbe:
            tcpSocket:
              port: 3306
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
            - name: MYSQL_DATABASE
              value: "discuz"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-data
      volumes:
        - name: mysql-data
          persistentVolumeClaim:
            claimName: pv-mysql-pvc
---
kind: Namespace
apiVersion: v1
metadata:
  name: discuz
---
kind: Service
apiVersion: v1
metadata:
  name: discuz-svc
  namespace: discuz
spec:
  clusterIP: None
  ports:
    - port: 80
      targetPort: 80
      name: http
  selector:
    app: discuz
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
  namespace: discuz
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz-deployment
  namespace: discuz
spec:
  replicas: 5
  selector:
    matchLabels:
      app: discuz
      deploy: discuz
  template:
    metadata:
      labels:
        app: discuz
        deploy: discuz
    spec:
      nodeName: k8s-m-03
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          livenessProbe:
            tcpSocket:
              port: 9000
          readinessProbe:
            tcpSocket:
              port: 9000
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
        - name: nginx
          image: alvinos/nginx:wordpress-v2
          livenessProbe:
            httpGet:
              port: 80
              path: /
          readinessProbe:
            httpGet:
              port: 80
              path: /
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
      volumes:
        - name: discuz-data
          persistentVolumeClaim:
            claimName: pv-discuz-pvc
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: discuz-ingress
  namespace: discuz
spec:
  tls:
    - hosts:
        - www.discuz.cluster.local.com
      secretName: discuz-secret
  rules:
    - host: www.discuz.cluster.local.com
      http:
        paths:
          - backend:
              serviceName: discuz-svc
              servicePort: 80

3、配置中心

configMap:配置中心

# 创建configmap

## 第一种方式:使用配置清单
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }

## 第二种方式:部署目录
kubectl create configmap nginx-tls  --from-file=./

## 第三种方式:部署文件
[root@k8s-m-01 ~/zs/Nginx]# kubectl create configmap nginx-tls-crt  --from-file=tls.crt 

# 使用configmap
## 使用挂载大方式,将配置文件挂载到容器中
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }
---
kind: Service
apiVersion: v1
metadata:
  name: nginx-config
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30089
  selector:
    app: nginx-config
  type: NodePort
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-config
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nginx-config
spec:
  selector:
    matchLabels:
      app: nginx-config
  template:
    metadata:
      labels:
        app: nginx-config
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config

        - name: nginx
          image: alvinos/nginx:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config

            - mountPath: /etc/nginx/conf.d
              name: nginx-config-configmap

      volumes:
        - name: nginx-config
          persistentVolumeClaim:
            claimName: nginx-config

        - name: nginx-config-configmap
          configMap:
            name: nginx-config
            items:
              - key: default.conf
                path: default.conf
              - key: tls.crt
                path: /etc

# configmap热更新
## 修改configmap中的文件,可以同步到所有的挂载此configmap的容器中(仅仅同步到容器中),但是如果使用subPath参数,则热更新失效。

## configMap挂载会直接覆盖原来的目录,如果不覆盖则需要使用subPath参数(subPath参数只能够针对文件,同时不支持热更新)

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  default.conf: |
    server {
        listen       80;
        listen  [::]:80;
        server_name  _;
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.php;
        }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }
  index.php: |
    <?php

    phpinfo();

    ?>
---
kind: Service
apiVersion: v1
metadata:
  name: nginx-config
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30089
  selector:
    app: nginx-config
  type: NodePort
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nginx-config
spec:
  selector:
    matchLabels:
      app: nginx-config
  template:
    metadata:
      labels:
        app: nginx-config
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: nginx-config-configmap

        - name: nginx
          image: alvinos/nginx:wordpress-v2
          volumeMounts:
            - mountPath: /usr/share/nginx/html/index.php
              name: nginx-config-configmap
              subPath: index.php

            - mountPath: /etc/nginx/conf.d
              name: nginx-config-configmap
      volumes:
        - name: nginx-config-configmap
          configMap:
            name: nginx-config
            items:
              - key: index.php
                path: index.php

4、Secret

Secret用来保存敏感数据,保存之前就必须将文件进行base64加密,挂载到pod中,自动解密。

Secret类型:

    tls: 一般用来部署证书
    Opaque : 一般用来部署密码
    Service Account : 部署kubernetes API认证信息
    kubernetes.io/dockerconfigjson : 部署容器仓库登录信息

apiVersion: v1
kind: Secret
metadata:
  name: test
data:
  name: b2xkYm95Cg==

数据持久化

1、回调钩子

1、PostStart : 启动回调钩子,是在容器启动之后立即执行

2、PreStop : 结束回调钩子,是在容器结束之前立即执行

kind: Deployment
apiVersion: apps/v1
metadata:
  name: lifecycle
spec:
  selector:
    matchLabels:
      app: cycle
  template:
    metadata:
      labels:
        app: cycle
    spec:
      nodeName: k8s-m-01
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/html/
              name: lifecycle-data
          lifecycle:
            postStart:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - "echo 'This is Lifecycle' > /usr/share/nginx/html/index.html"
            preStop:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - "echo 'This is Lifecycle preStop' >> /usr/share/nginx/html/index.html"
      volumes:
        - name: lifecycle-data
          hostPath:
            path: /opt/discuz/data

2、数据持久化

1、emptyDir : 是pod调度到节点上时创建的一个空目录,当pod被删除时,emptyDir中的数据也随即被删除,emptyDir长用于容器间分享文件,或者用于创建临时目录。

注:emptyDir不能够用来做数据持久化

kind: Deployment
apiVersion: apps/v1
metadata:
  name: emptydir
spec:
  selector:
    matchLabels:
      app: emptydir
  template:
    metadata:
      labels:
        app: emptydir
    spec:
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/nginx
              name: test-emptydir
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /usr/share/nginx
              name: test-emptydir

      volumes:
        - name: test-emptydir
          emptyDir: {}

2、hostPath
    hostPath类似于docker -v参数,将宿主主机中的文件挂载pod中,但是hostPath比docker -v参数更强大,(Pod调度到哪个节点,则直接挂载到当前节点上)

kind: Deployment
apiVersion: apps/v1
metadata:
  name: hostpath
spec:
  selector:
    matchLabels:
      app: hostpath
  template:
    metadata:
      labels:
        app: hostpath
    spec:
      nodeName: k8s-m-01
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/nginx
              name: test-hostpath
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /usr/share/nginx
              name: test-hostpath

      volumes:
        - name: test-hostpath
          hostPath:
            path: /opt/hostpath
            type: DirectoryOrCreate

3、pv/PVC

disk  --->  100G    k8s-m-01
    pv1 ---> 50G
    pv2 ---> 30G
    pv3 ---> 20G

pod   --->  10G     k8s-m-02

    1. 在所有节点上安装nfs
        yum install nfs-utils.x86_64 -y

    2. 配置
    [root@kubernetes-master-01 nfs]# mkdir -p /nfs/v{1..10}
    [root@kubernetes-master-01 nfs]# cat > /etc/exports <<EOF
    /nfs/v1  172.16.1.0/24(rw,no_root_squash)
    /nfs/v2  172.16.1.0/24(rw,no_root_squash)
    /nfs/v3  172.16.1.0/24(rw,no_root_squash)
    /nfs/v4  172.16.1.0/24(rw,no_root_squash)
    /nfs/v5  172.16.1.0/24(rw,no_root_squash)
    /nfs/v6  172.16.1.0/24(rw,no_root_squash)
    /nfs/v7  172.16.1.0/24(rw,no_root_squash)
    /nfs/v8  172.16.1.0/24(rw,no_root_squash)
    /nfs/v9  172.16.1.0/24(rw,no_root_squash)
    /nfs/v10 172.16.1.0/24(rw,no_root_squash)
    EOF
    [root@k8s-m-01 /nfs]# exportfs -arv
    exporting 172.16.0.0/16:/nfs/v10
    exporting 172.16.0.0/16:/nfs/v9
    exporting 172.16.0.0/16:/nfs/v8
    exporting 172.16.0.0/16:/nfs/v7
    exporting 172.16.0.0/16:/nfs/v6
    exporting 172.16.0.0/16:/nfs/v5
    exporting 172.16.0.0/16:/nfs/v4
    exporting 172.16.0.0/16:/nfs/v3
    exporting 172.16.0.0/16:/nfs/v2
    exporting 172.16.0.0/16:/nfs/v1
    [root@k8s-m-01 /nfs]# showmount -e
    Export list for k8s-m-01:
    /nfs/v10 172.16.0.0/16
    /nfs/v9  172.16.0.0/16
    /nfs/v8  172.16.0.0/16
    /nfs/v7  172.16.0.0/16
    /nfs/v6  172.16.0.0/16
    /nfs/v5  172.16.0.0/16
    /nfs/v4  172.16.0.0/16
    /nfs/v3  172.16.0.0/16
    /nfs/v2  172.16.0.0/16
    /nfs/v1  172.16.0.0/16

    3.测试k8s使用nfs
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs
spec:
  selector:
    matchLabels:
      app: nfs

  template:
    metadata:
      labels:
        app: nfs
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: nfs
      volumes:
        - name: nfs
          nfs:
            path: /nfs/v1
            server: 172.16.1.51
    4. 使用pv/pvc来管理nfs
#########################################################################################
#  1、部署MySQL集群
#     1、创建命名空间
#     2、创建service提供负载均衡
#     3、使用控制器部署MySQL实例
###
#  2、部署Discuz应用
#     1、创建命名空间
#     2、创建Service提供负载均衡(Headless Service)
#     3、创建服务并挂载代码
#     4、创建Ingress,用于域名转发(https)
###
#  3、服务之间的互连
#     1、Discuz连接MySQL  --->  mysql.mysql.svc.cluster.local
#########################################################################################

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-mysql
spec:
  nfs:
    path: /nfs/v3
    server: 172.16.1.51
  capacity:
    storage: 20Gi
  persistentVolumeReclaimPolicy: Retain
  accessModes:
    - "ReadWriteOnce"
    - "ReadWriteMany"
---
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql-svc
  namespace: mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
  selector:
    app: mysql
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-mysql-pvc
  namespace: mysql
spec:
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "20Gi"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-deployment
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
      deploy: discuz
  template:
    metadata:
      labels:
        app: mysql
        deploy: discuz
    spec:
      nodeName: k8s-m-02
      containers:
        - name: mysql
          image: mysql:5.7
          livenessProbe:
            tcpSocket:
              port: 3306
          readinessProbe:
            tcpSocket:
              port: 3306
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
            - name: MYSQL_DATABASE
              value: "discuz"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-data
      volumes:
        - name: mysql-data
          persistentVolumeClaim:
            claimName: pv-mysql-pvc
---
kind: Namespace
apiVersion: v1
metadata:
  name: discuz
---
kind: Service
apiVersion: v1
metadata:
  name: discuz-svc
  namespace: discuz
spec:
  clusterIP: None
  ports:
    - port: 80
      targetPort: 80
      name: http
  selector:
    app: discuz
    deploy: discuz
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-discuz
spec:
  nfs:
    path: /nfs/v4
    server: 172.16.1.51
  capacity:
    storage: 20Gi
  persistentVolumeReclaimPolicy: Retain
  accessModes:
    - "ReadWriteOnce"
    - "ReadWriteMany"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-discuz-pvc
  namespace: discuz
spec:
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "18Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz-deployment
  namespace: discuz
spec:
  replicas: 5
  selector:
    matchLabels:
      app: discuz
      deploy: discuz
  template:
    metadata:
      labels:
        app: discuz
        deploy: discuz
    spec:
      nodeName: k8s-m-03
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
          livenessProbe:
            tcpSocket:
              port: 9000
          readinessProbe:
            tcpSocket:
              port: 9000
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
        - name: nginx
          image: alvinos/nginx:wordpress-v2
          livenessProbe:
            httpGet:
              port: 80
              path: /
          readinessProbe:
            httpGet:
              port: 80
              path: /
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz-data
      volumes:
        - name: discuz-data
          persistentVolumeClaim:
            claimName: pv-discuz-pvc
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: discuz-ingress
  namespace: discuz
spec:
  tls:
    - hosts:
        - www.discuz.cluster.local.com
      secretName: discuz-secret
  rules:
    - host: www.discuz.cluster.local.com
      http:
        paths:
          - backend:
              serviceName: discuz-svc
              servicePort: 80

健康检查

健康检查

怎样保证pod中的容器正常启动?

怎样保证pod中容器能够正常对外提供服务?

只有容器启动了并且能够正常对外提供服务了,才能放到负载均衡上供给用户访问

1、检查pod中容器是否能够正常启动

pod中所有容器的status=Running时,Pod的状态才会是Running状态。

当存活性检查检测失败的时候,kebulet会删除容器,重新启动一个新的容器。继续检查。
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            exec:
              command:
                - cat
                - /root/test/manage.py

# httpGet检查
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            httpGet:
              port: 80
              path: /index

# TcpSocket 相当于 ping
---
kind: Service
apiVersion: v1
metadata:
  name: name-mysql
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            tcpSocket: 
              port: 80 

1.1、监控检查参数

apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: alvinos/django:v1
          livenessProbe:
            tcpSocket: 
              port: 80 

# 检查失败最少测试,默认:3

delay=10s   : 探测延时时间initialDelaySeconds
timeout=1s  :探测的超时时间
period=10s  :探测的频率
success=1   :成功多少次才算成功
failure=1   :失败多少次才算失败

failureThreshold:最少连续几次探测失败的次数,满足该次数则认为fail
initialDelaySeconds:容器启动之后开始进行存活性探测的秒数。不填立即进行
periodSeconds:执行探测的频率(秒)。默认为10秒。最小值为1。
successThreshold:探测失败后,最少连续探测成功多少次才被认定为成功,满足该次数则认为success。(但是如果是liveness则必须是 1。最小值是 1。)
timeoutSeconds:每次执行探测的超时时间,默认1秒,最小1秒。

2、检查pod中容器是否能够正常对外提供服务

# 就绪性探测的特点是探测失败,立即移出负载均衡(endprints ---> NotReadyAddresses)

---
kind: Service
apiVersion: v1
metadata:
  name: readnessprobe
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: readnessprobe
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: readnessprobe
spec:
  selector:
    matchLabels:
      app: readnessprobe
  template:
    metadata:
      labels:
        app: readnessprobe
    spec:
      containers:
        - name: readnessprobe
          image: alvinos/django:v1
          readnessProbe:
            exec:
              command:
                - cat
                - /root/test/manage.py

总结

存活性探测:探测失败,立即删除容器
就绪性探测:探测失败,立即移除负载均衡

Jenkins

Jenkins

1、安装Jenkins

1、安装Java
yum install java-1.8.0-openjdk* -y

2、下载Jenkins安装包
wget https://mirrors.tuna.tsinghua.edu.cn/jenkins/redhat-stable/jenkins-2.249.1-1.1.noarch.rpm

3、安装认证Key
rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key

4、安装
yum install jenkins-2.249.1-1.1.noarch.rpm

5、启动
systemctl enable --now jenkins

6、安装插件
[root@localhost ~]# tar -zxvf /opt/plugins.tar.gz -C /var/lib/jenkins/
[root@localhost ~]# systemctl restart jenkins

7、处理admin密码丢失问题
[root@localhost admin_7050982324762688703]# vim /var/lib/jenkins/users/admin_7050982324762688703/config.xml 
<passwordHash>#jbcrypt:$2a$10$CEFbiUohDtWimNh4o3TBje2EEXgljqA/frbwED0Go5X533dd.jk6W</passwordHash>
替换成
<passwordHash>#jbcrypt:$2a$10$MiIVR0rr/UhQBqT.bBq0QehTiQVqgNpUGyWW2nJObaVAM/2xSQdSq</passwordHash>

8、安装git
[root@localhost ~]# yum install git -y

2、用户权限

  • 创建用户
系统管理 ---> 用户管理 ---> 新建用户

1618451136706

  • 创建权限组

使用权限组,需要安装Role-Based Strategy权限插件

1、 启用授权策略插件
    系统管理 ---> 全局安全配置 ---> 授权策略 ---> Role-Based Strategy

2、创建及管理权限组
    系统管理 ---> Manage and Assign Roles ---> Manage Roles ---> 各个角色(权限组)

1618451538753

1618451804102

3、分配角色
    系统管理 ---> Manage and Assign Roles ---> Assign Roles

4、使用通配符
    .* : 匹配以什么开头的项目

2、凭证管理

系统管理 --->  Manage Credentials ---> 系统凭证 ---> 全局凭证 ---> 添加凭证
  • http
系统管理 --->  Manage Credentials ---> 系统凭证 ---> 全局凭证 ---> 添加凭证 ---> Username with Password
  • ssh
系统管理 --->  Manage Credentials ---> 系统凭证 ---> 全局凭证 ---> 添加凭证 ---> ssh username with private key
  • 登录服务器ssh

1618456404001

1618459316118

1618459419128

GitLab

GitLab

官网:https://about.gitlab.com/

Gitlab是依赖于Git的远程代码仓库,类似于GitHub、Gitee,不同的是GitHub、Gitee的公网上代码仓库, Gitlab是可以私有化部署的免费远程代码仓库

1、部署GitLab

1、准备服务器
    192.168.15.60   gitlab  

2、下载安装包
wget https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el7/gitlab-ce-13.0.3-ce.0.el7.x86_64.rpm

3、安装GitLab
# 安装依赖包
[root@gitlab /opt]# yum install -y curl policycoreutils-python openssh-server perl
# 关闭防火墙
[root@gitlab /opt]# systemctl disable --now firewalld
# 关闭selinux
[root@gitlab /opt]# sed -i 's#enforcing#disabled#g' /etc/sysconfig/selinux
# 临时关闭
[root@gitlab /opt]# setenforce 0
# 安装
[root@gitlab /opt]# yum install gitlab-ce-13.0.3-ce.0.el7.x86_64.rpm 
# 修改配置文件
[root@sean ~]# vim /etc/gitlab/gitlab.rb
external_url 'http://192.168.15.60'
nginx['listen_port'] = 80
# 刷新配置(默认启动)
gitlab-ctl reconfigure
1.Guest:可以创建issue、发表评论,不能读写版本库 
2.Reporter:可以克隆代码,不能提交,QA、PM 可以赋予这个权限 
3.Developer:可以克隆代码、开发、提交、push,普通开发可以赋予这个权限 
4.Maintainer:可以创建项目、添加tag、保护分支、添加项目成员、编辑项目,核心开发可以赋予这个 权限 
5.Owner:可以设置项目访问权限 - Visibility Level、删除项目、迁移项目、管理组成员,开发组组 长可以赋予这个权限

一、组件介绍

1、nginx: 静态web服务器在这里插入代码片

2、gitlab-shell: 用于处理Git命令和修改authorized keys列表
3、gitlab-workhorse: 轻量级的反向代理服务器,可以处理一些大的HTTP请求(磁盘上的 CSS、JS 文件、文件上传下载等),处理 Git Push/Pull 请求,处理到Rails 的连接会反向代理给后端的unicorn(修改由 Rails 发送的响应或发送给 Rails 的请求,管理 Rails 的长期 WebSocket 连接等)。

4、logrotate:日志文件管理工具

5、postgresql:repository 中的数据(元数据,issue,合并请求 merge request 等 , 可以登录 Web 的用户

6、redis:缓存每个客户端的sessions和后台队列,负责分发任务。Redis需求的存储空间很小,大约每个用户25KB

7、sidekiq:用于在后台执行队列任务(异步执行)

8、unicorn:Gitlab 自身的 Web 服务器(Ruby Web Server),包含了 Gitlab 主进程,负责处理快速/一般任务,与 Redis 一起工作,配置参考:CPU核心数 + 1 = unicorn workers数量。
    1. 通过检查存储在 Redis 中的用户会话来检查权限
    2. 为 Sidekiq 制作任务
    3. 从仓库(warehouse)取东西或在那里移动东西

9、gitlab-shell:用于 SSH 交互,而不是 HTTP。gitlab-shell 通过 Redis 与 Sidekiq 进行通信,并直接或通过 TCP 间接访问 Unicorn。用于处理Git命令和修改authorized keys列表

10、Gitaly:后台服务,专门负责访问磁盘以高效处理 gitlab-shell 和 gitlab-workhorse 的git 操作,并缓存耗时操作。所有的 git 操作都通过 Gitaly 处理,并向 GitLab web 应用程序提供一个 API,以从 git(例如 title, branches, tags, other meta data)获取属性,并获取 blob(例如 diffs,commits,files)

11、Sidekiq:后台核心服务,可以从redis队列中提取作业并对其进行处理。后台作业允许GitLab通过将工作移至后台来提供更快的请求/响应周期。Sidekiq任务需要来自Redis

12、prometheus:提供监控

二、常用命令

1、服务控制命令

1)启动/停止/重启所有 gitlab 组件:

gitlab-ctl start/stop/restart

2)启动指定模块组件:

 gitlab-ctl start redis/postgresql/gitlab-workhorse/logrotate/nginx/sidekiq/unicorn

3)停止指定模块组件:

gitlab-ctl stop 模块名

4)查看服务状态

gitlab-ctl status

5)生成配置并启动服务

 gitlab-ctl reconfigure

2、其他常用管理命令

1)查看版本

cat /opt/gitlab/embedded/service/gitlab-rails/VERSION 

2)检查gitlab

gitlab-rake gitlab:check SANITIZE=true --trace

3)实时查看日志

gitlab-ctl tail

4)关系数据库升级

gitlab-rake db:migrate

5)清理redis缓存

gitlab-rake cache:clear

6)升级gitlab-ce版本

# 关闭gitlab服务
gitlab-ctl stop unicorn
gitlab-ctl stop sidekiq
gitlab-ctl stop nginx
# 备份gitlab
gitlab-rake gitlab:backup:create
# 升级rpm包
rpm -Uvh gitlab-ce-xxx.rpm
# 启动并查看gitlab版本信息
gitlab-ctl reconfigure
gitlab-ctl restart
head -1 /opt/gitlab/version-manifest.txt

# 常见报错
Error executing action `run` on resource 'ruby_block[directory resource: /var/opt/gitlab/git-data/repositories]'
# 解决方法:
sudo chmod 2770 /var/opt/gitlab/git-data/repositories

7)升级postgreSQL最新版本

gitlab-ctl pg-upgrade

3、日志

1)实时查看所有日志

gitlab-ctl tail

2)实时查看某个组件日志

gitlab-ctl tail [组件名称]

4、常用配置文件目录

主配置文件: /etc/gitlab/gitlab.rb
文档根目录: /opt/gitlab
默认存储库位置: /var/opt/gitlab/git-data/repositories
Nginx配置文件: /var/opt/gitlab/nginx/conf/gitlab-http.conf
Postgresql数据目录: /var/opt/gitlab/postgresql/data

5、gitlab管理员密码重置

[root@test bin]# gitlab-rails console production
-------------------------------------------------------------------------------------
 GitLab:       11.10.4 (62c464651d2)
 GitLab Shell: 9.0.0
 PostgreSQL:   9.6.11
-------------------------------------------------------------------------------------
Loading production environment (Rails 5.0.7.2)
irb(main):001:0> user = User.where(id:1).first
=> #<User id:1 @root>
irb(main):002:0> user.password = 'qwer1234'
=> "qwer1234"
irb(main):003:0> user.password_confirmation = 'qwer1234'
=> "qwer1234"
irb(main):004:0> user.save
Enqueued ActionMailer::DeliveryJob (Job ID: 4752a4a4-4e85-4e8b-9f27-72788abfe97c) to Sidekiq(mailers) with arguments: "DeviseMailer", "password_change", "deliver_now", #<GlobalID:0x00007f519e7501d8 @uri=#<URI::GID gid://gitlab/User/1>>
=> true
irb(main):005:0> exit

6、使用smtp来发送邮件通知

vim /etc/gitlab/gitlab.rb

 gitlab_rails['smtp_address'] = "smtp.yourdomain.com"
 gitlab_rails['smtp_port'] = 25
 gitlab_rails['smtp_user_name'] = "xxx"
 gitlab_rails['smtp_password'] = "xxx"
 gitlab_rails['smtp_domain'] = "smtp.yourdomain.com" 
 gitlab_rails['smtp_authentication'] = 'plain'
 gitlab_rails['smtp_enable_starttls_auto'] = true

7、配置gitlab访问模式为https

# 创建ssl证书存放目录
    mkdir -p /etc/gitlab/ssl
    chmod 0700 /etc/gitlab/ssl

# 上传证书,修改证书访问权限
    chmod 600 /etc/gitlab/ssl/gitlab.xxx.com.crt

# 修改住配置,支持ssl访问
    vim /etc/gitlab/gitlab.rb
    external_url "[https://gitlab.bjwf125.com]            (https://gitlab.bjwf125.com)"
    nginx['redirect_http_to_https'] = true
    nginx['ssl_certificate'] = "/etc/gitlab/ssl/gitlab.xxx.com.crt"
    nginx['ssl_certificate_key'] = "/etc/gitlab/ssl/gitlab.xxx.com.key"

# 重启
    gitlab-ctl reconfigure

# 开启防火墙

  firewall-cmd --zone=public --add-port=443/tcp --permanent
  firewal-cmd reload

8、Gitlab备份

gitlab备份的默认目录是/var/opt/gitlab/backups

1)备份命令

gitlab-rake gitlab:backup:create
#该命令会在备份目录(默认:/var/opt/gitlab/backups/)下创建一个tar压缩包xxxxxxxx_gitlab_backup.tar,其中开头的xxxxxx是备份创建的时间戳,这个压缩包包括GitLab整个的完整部分。

2)自动备份

#通过任务计划crontab 实现自动备份
0 2 * * * /usr/bin/gitlab-rake gitlab:backup:create 
# 每天两点备份gitlab数据

3)备份配置文件

vim /etc/gitlab/gitlab.rb
gitlab_rails['manage_backup_path'] = true
gitlab_rails['backup_path'] = "/data/gitlab/backups"    #指定gitlab备份目录
gitlab_rails['backup_archive_permissions'] = 0644       #生成的备份文件权限
gitlab_rails['backup_keep_time'] = 7776000              #备份保留天数为3个月(即90天,这里是7776000秒)

#重载生效
gitlab-ctl reconfigure

9、备份恢复

gitlab-ctl stop unicorn
gitlab-ctl stop sidekiq
gitlab-rake gitbab:backup:restore BACKUP=xxxxxx(恢复文件)
gitlab-ctl start 启动gitlab
  • 修改权限

git

1、介绍

1、将代码上传到服务器,然后解压

2、将代码上传到服务器中,然后通过软连接的方式部署

无法实现批量部署,无法实现批量回滚,更无法实现批量测试。

# 批量部署,批量测试

源代码管理

协同开发

代码管理工具

自动集成(java、golang)、自动部署、自动测试: DevOps

代码:
    编译型语言:需要编译,后运行
    脚本型语言:直接运行

工具:
实现自动集成、自动部署:Jenkins
代码管理:git ---> gitlab、github、gitee

1、 软件开发生命周期

1、设计 ---> 需求分析

2、实现
    写配置清单,构建容器,部署一个测试环境

3、测试

4、优化(迭代)

代码管理工具

代码管理工具:git

1、安装Git

[root@alvin-test-os deploy2]# yum install git -y
[root@alvin-test-os deploy2]# git --version
git version 1.8.3.1

2、初始化代码仓库

[root@alvin-test-os data]# git init
Initialized empty Git repository in /root/data/.git/
[root@alvin-test-os data]# ll -a
total 4
drwxr-xr-x. 3 root root   18 Apr 13 10:35 .
dr-xr-x---. 8 root root 4096 Apr 13 10:32 ..
drwxr-xr-x. 7 root root  119 Apr 13 10:35 .git

3、将代码提交到Git暂存区

[root@alvin-test-os data]# git add index.html

4、文件提交到git缓冲区

[root@alvin-test-os data]# git commit -m 'init' .

参数:
    -m : 添加注释

5、回滚

[root@alvin-test-os data]# cat index.php 
<?php

phpinfo();
[root@alvin-test-os data]# git reset --hard 180d937cda0a4caaa1683753a84af166399da1f5
HEAD is now at 180d937 init
[root@alvin-test-os data]# cat index.php 
1111

6、远程仓库

# github   gitee   gitlab

# 下载远程仓库
[root@alvin-test-os ~]# git clone https://gitee.com/chenyang0910/data.git

# 将文件提交到远程仓库

## 设置用户名和邮箱
[root@alvin-test-os data]# git config --global user.name "Alvin"
[root@alvin-test-os data]# git config --global user.email "chenyangqit@163.com"

[root@alvin-test-os data]# git config --get user.name
Alvin
[root@alvin-test-os data]# git config --get user.email
chenyangqit@163.com

# 将本地代码提交到远程仓库
[root@alvin-test-os data]# git push -u origin master
Username for 'https://gitee.com': xxx
Password for 'https://15517127859@gitee.com': 
Counting objects: 3, done.
Writing objects: 100% (3/3), 209 bytes | 0 bytes/s, done.
Total 3 (delta 0), reused 0 (delta 0)
remote: Powered by GITEE.COM [GNK-5.0]
To https://gitee.com/chenyang0910/data.git
 * [new branch]      master -> master
Branch master set up to track remote branch master from origin.

# 使用SSh将我们的代码提交到远程
[root@alvin-test-os data]# vim .git/config 
    url = git@gitee.com:chenyang0910/data.git 

[root@alvin-test-os data]# git push -u origin master
Warning: Permanently added 'gitee.com,212.64.62.183' (ECDSA) to the list of known hosts.
Counting objects: 5, done.
Writing objects: 100% (3/3), 241 bytes | 0 bytes/s, done.
Total 3 (delta 0), reused 0 (delta 0)
remote: Powered by GITEE.COM [GNK-5.0]
To git@gitee.com:chenyang0910/data.git
   63004c0..dd584e3  master -> master
Branch master set up to track remote branch master from origin.

# 拉去远程仓库代码
[root@alvin-test-os data]# git pull
remote: Enumerating objects: 5, done.
remote: Counting objects: 100% (5/5), done.
remote: Compressing objects: 100% (2/2), done.
Unpacking objects: 100% (3/3), done.
remote: Total 3 (delta 0), reused 0 (delta 0), pack-reused 0
From gitee.com:chenyang0910/data
   dd584e3..78fd071  master     -> origin/master
Updating dd584e3..78fd071
Fast-forward
 index.html | 6 ++++++
 1 file changed, 6 insertions(+)

# git pull 和 git clone 之间有什么不同?

git push 

7、分支

# 房子 ---> 房间
# git仓库  ---> 代表是房间

不同的分支之间是相对隔离的。

代码仓库:oldboy

开发环境
测试环境
预发布环境
线上环境

# 查看本地分支
[root@alvin-test-os data]# git branch 
* master

# 查看远程分支    
[root@alvin-test-os data]# git branch -a
* master
  remotes/origin/HEAD -> origin/master
  remotes/origin/master

# 创建分支
[root@alvin-test-os data]# git checkout -b test
Switched to a new branch 'test'
[root@alvin-test-os data]# git branch 
  master
* test

# 将本地分支同步到远程分支
[root@alvin-test-os data]# git branch 
  master
* test
[root@alvin-test-os data]# git push -u origin test
Total 0 (delta 0), reused 0 (delta 0)
remote: Powered by GITEE.COM [GNK-5.0]
remote: Create a pull request for 'test' on Gitee by visiting:
remote:     https://gitee.com/chenyang0910/data/pull/new/chenyang0910:test...chenyang0910:master
To git@gitee.com:chenyang0910/data.git
 * [new branch]      test -> test
Branch test set up to track remote branch test from origin.

# 切换分支
[root@alvin-test-os data]# git checkout  master
Switched to branch 'master'
[root@alvin-test-os data]# git branch 
* master
  test

# 标签:标签是一种特殊的分支,特点是只能够删除不能够修改

## 创建标签: 本地创建    远程仓库创建
git tag -a [tag名称] -m "解释"

## 拉去指定标签内容
git clone -b [标签名称|分支名称] [仓库地址]

创建本地仓库,关联上远程仓库,并实现脚本通过标签部署discuz

1、创建本地仓库
[root@kubernetes-master-01 mnt]# mkdir data
[root@kubernetes-master-01 mnt]# cd data/
[root@kubernetes-master-01 data]# git init
Initialized empty Git repository in /mnt/data/.git/
[root@kubernetes-master-01 data]# 

2、关联上远程仓库
    1、创建远程仓库
    2、将本地仓库关联上远程仓库
[root@kubernetes-master-01 data]# cat .git/config 
[core]
    repositoryformatversion = 0
    filemode = true
    bare = false
    logallrefupdates = true
[root@kubernetes-master-01 data]# git remote add origin git@gitee.com:chenyang0910/discuz.git
[root@kubernetes-master-01 data]# cat .git/config 
[core]
    repositoryformatversion = 0
    filemode = true
    bare = false
    logallrefupdates = true
[remote "origin"]
    url = git@gitee.com:chenyang0910/discuz.git
    fetch = +refs/heads/*:refs/remotes/origin/*

3、实现脚本通过标签部署discuz
    1、准备代码
    2、提交的git暂存区
    3、配置用户名和邮箱
    4、提交
    5、推送(认证)
    6、创建标签
    7、编写部署脚本

[root@kubernetes-master-01 mnt]# cat deploy.sh 
#!/bin/bash
REPOASITORY=discuz-`date +%F_%H:%M:%S`
REPOASITORY_URL="git@gitee.com:chenyang0910/discuz.git"
REPOASITORY_TAG=$1
CURRENT_DIR=`pwd`
git clone -b ${REPOASITORY_TAG} ${REPOASITORY_URL} ${REPOASITORY}
# /usr/share/nginx/html
rm -rf /usr/share/nginx/html
ln -s ${CURRENT_DIR}/${REPOASITORY} /usr/share/nginx/html

ingess nginx

1、HeadLess Service

HeadLessService实际上是属于ClusterIP

2、Service在创建之前是可以自定义IP的

3、Service跟Pod之间关系

service ---> endpoints  ---> pod

4、Ingress

nginx ingress  :  性能强
traefik :原生支持k8s
istio : 服务网格,服务流量的治理

5、Ingress Nginx

1、创建HTTPS证书
openssl genrsa -out tls.key 2048
openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test.com

2、部署证书
kubectl -n wordpress create secret tls ingress-tls --cert=tls.crt --key=tls.key

3、创建Ingress
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - secretName: ingress-tls  # secret名字
  rules:
    - host: www.test.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress
              servicePort: 80

4、查看ingress暴露的443端口
[root@k8s-m-01 ~]# kubectl get svc -n ingress-nginx 
NAME                                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.25.245   <none>        80:52109/TCP,443:45419/TCP   30m

5、浏览器访问
https://www.test.com:45419/
# 案例
apiVersion: v1
kind: Namespace
metadata:
  name: mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql
  namespace: mysql
spec:
  ports:
    - name: http
      port: 3306
      targetPort: 3306
  selector:
    app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: name-mysql
  namespace: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - name: mysql
          image: mysql:5.7.33
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: wordpress
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress
  template:
    metadata:
      labels:
        app: wordpress
    spec:
      containers:
        - name: php
          image: alvinos/php:wordpress-v2
        - name: nginx
          image: alvinos/nginx:wordpress-v2

---
apiVersion: v1
kind: Service
metadata:
  name: wordpress
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: wordpress
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - secretName: ingress-tls
  rules:
    - host: www.test.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress
              servicePort: 80

1、ingress种类

1、Nginx Ingress

2、treafik 

3、服务网格:istio

2、安装Ingress

# 下载Ingress Nginx配置清单
[root@k8s-m-01 ~]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.44.0/deploy/static/provider/baremetal/deploy.yaml

# 修改镜像
[root@k8s-m-01 ~]# sed -i 's#k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a#registry.cn-hangzhou.aliyuncs.com/k8sos/ingress-controller:v0.44.0#g' deploy.yaml

# 开始部署
[root@k8s-m-01 ~]# kubectl apply -f deploy.yaml

# 检查
[root@k8s-m-01 ~]# kubectl get pods -n ingress-nginx 
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-g9brk        0/1     Completed   0          3d22h
ingress-nginx-admission-patch-tzlgf         0/1     Completed   0          3d22h
ingress-nginx-controller-8494fd5b55-wpf9g   1/1     Running     0          3d22h

3、测试http

1、部署服务(Deployment + Service)

2、编写ingress配置清单(见下文)
  • 配置清单
pod+service配置
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ingress-test
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx
---
kind: Service
apiVersion: v1
metadata:
  name: wordpress-nginx
spec:
  ports:
    - name: http
      port: 80
      targetPort: 80
  selector:
    app: nginx

ingress nginx 配置

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

4、测试https

1、创建证书
[root@k8s-m-01 ~]# openssl genrsa -out tls.key 2048
[root@k8s-m-01 ~]# openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test-nginx.com

2、部署证书
[root@k8s-m-01 ~]# kubectl -n default create secret tls ingress-tls --cert=tls.crt --key=tls.key

3、编写ingress配置清单(见下文)

4、部署并测试
[root@k8s-m-01 ~]# curl -k https://www.test-nginx.com:44490/
  • 配置清单

”’bash

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
    - hosts: 
        - www.test-nginx.com
      secretName: ingress-tls
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

5、nginx ingress常用语法

https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#service-upstream

# 域名重定向(不能重定向 / )
nginx.ingress.kubernetes.io/rewrite-target

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: https://www.baidu.com/s?wd=nginx
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 设置ingress白名单
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/whitelist-source-range: 192.168.15.53,192.168.15.52
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 域名重定向
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/permanent-redirect: https://www.baidu.com
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# 使用正则的方式匹配(支持的正则比较少)
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: https://www.baidu.com/s?wd=$1
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /search/(.+)
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

# nginx登录
https://kubernetes.github.io/ingress-nginx/examples/auth/basic/

kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: ingress-ingress-nginx-tls
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/auth-type: basic
    nginx.ingress.kubernetes.io/auth-secret: basic-auth
    # nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo'
spec:
  rules:
    - host: www.test-nginx.com
      http:
        paths:
          - path: /
            backend:
              serviceName: wordpress-nginx
              servicePort: 80

6、设置nginx常用用法的时候

有两种方式:
    1、注解        : 当前ingress生效
    2、configMap  : 全局ingress生效