K8S 培训

k8s中的基础操作,都通过rancher安装的集群进行操作。

1、标签与注解

1.1、标签(Label)在新窗口打开

# 查看资源的标签信息
kubectl get node --show-labels 

# 设置node的标签如:demo=test,标签可以没有值如: demo=
kubectl label nodes node01 demo=test

# 以yaml格式查看node01详细信息
kubectl get nodes node01 -o yaml

# 删除节点的标签(减号为删除标签)
kubectl label nodes node01 demo-

1.2、注解(Annotation)在新窗口打开

# 查看Annotation
# kubectl describe resource_type resource_name
kubectl describe nodes node01

# 添加Annotation
# kubectl annotate resource_type resource_name key=value
# 给node01 打一个Annotation,key为demo,value为test
kubectl annotate nodes node01 demo=test

# 删除节点的注解(减号为删除注解)
kubectl annotate nodes node01 demo-

2、命名空间在新窗口打开

2.1、Namespaces

# 查看当前集群下的namespace
kubectl get namespaces 

2.2、命令

# 创建一个test的命名空间
kubectl create namespace test

# 删除test命名空间
kubectl delete namespaces test 

2.3、 yaml

# 创建一个目录来编写yaml文件
mkdir yaml
cd yaml

# 编写yaml
cat << EOF > my-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: test
EOF

# 执行yaml文件创建namespaces
kubectl create -f my-namespace.yaml

# 查看创建的namespace
kubectl get namespaces 

# 通过yaml文件删除namespaces
kubectl delete -f my-namespace.yaml 

2.4、查看资源

# 在命名空间中的资源
kubectl api-resources --namespaced=true

# 不在命名空间中的资源
kubectl api-resources --namespaced=false

3、pod在新窗口打开

3.1、创建Pod

# 编写yaml
cat << EOF > pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memory-demo
spec:
  containers:
  - name: demo
    image: polinux/stress
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
EOF
# 执行yaml创建pod
kubectl create -f pod.yaml     
# 查看pod
kubectl get pods

# NAME: Pod名字
# READY: Pod 是否为READY
# STATUS: Pod状态
# RESTARTS:容器的重启次数
# AGE:生命周期
# 在kubernetes中,镜像的下载策略imagePullPolicy分为:
#  Always:每次都下载镜像(默认)
#  Never:只使用本地镜像,从不下载
#  IfNotPresent:只有当本地没有的时候才下载镜像

# 删除pod
kubectl delete -f pod.yaml

3.2、 在yaml文件里指定image下载策略

cat << EOF > pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memory-demo
spec:
  containers:
  - name: demo
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
EOF

# 执行yaml创建pod (apply在没有时会创建,存在时会更新,create只会创建)
kubectl apply -f pod.yaml 
# 查看pod详情
kubectl get pods memory-demo -o yaml
# 删除pod
kubectl delete -f pod.yaml   

3.3、 一个pod里可以有多个容器

# 编写yaml
# demo2 容器启动将会出错,因没有command
cat << EOF > mu-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memory-demo
spec:
  containers:
  - name: demo1
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
  - name: demo2
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
EOF

# 执行yaml创建pod
kubectl create -f mu-pod.yaml  
# 第二个容器demo2启动会出错
# kubectl describe可查看pod的详细信息,Events将列出pod创建的日志
kubectl describe pods memory-demo
# vi mu-pod.yaml 给demo2添加一行 command: ["stress"]
# 先删除pod
kubectl delete -f mu-pod.yaml  
# 修改mu-pod.yaml
vi mu-pod.yaml
# 再重新创建
kubectl create -f mu-pod.yaml  

3.4、 登陆容器操作

# pod封装一个容器
# -n 命名空间选择 -it 以交互方式执行 -c 指定容器
# kubectl -n mem-example exec -it memory-demo bash
# kubectl -n mem-example exec -it memory-demo -- ls /root

# 登录到第一个容器内部 (有多容器默认进入第一个容器)
kubectl exec -it memory-demo  bash
exit
# 在第一个容器容器执行 ls etc 命令
kubectl exec -it memory-demo -- ls etc

# pod封装多个容器
# kubectl -n mem-example exec -it -c memory-demo-ctr-2 memory-demo bash
# kubectl -n mem-example exec -it -c memory-demo-ctr-2 memory-demo -- ls /root
# 登录到demo1容器内部(-c 指定容器)
kubectl exec -it -c demo1 memory-demo  bash
# 在demo2容器执行 ls etc 命令
kubectl exec -it -c demo2 memory-demo -- ls etc

3.5、apply、edit与patch的使用

# 编写yaml
cat << EOF > pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test
  labels:
      app: test
spec:
  containers:
  - name: demo
    imagePullPolicy: IfNotPresent
    image: polinux/stress
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
EOF

# apply更新本地的yaml,通过apply更新配置
# 通过apply来执行的label更新会同步到Annotations,直接edit或patch更新的lable不会同步到Annotations
kubectl apply -f pod.yaml 

# 查看pod的label
kubectl get pod --show-labels 

# edit (直接修改pod yaml,如添加label)
kubectl edit pod test

# patch (通过json来修改pod信息如:labels)
kubectl get pod test -o json
kubectl patch pod test -p '{"metadata": {"labels": {"app": "test222"}}}'

3.6、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

4、init container 在新窗口打开

initContainers为最开始执行的容器,只有执行成功后,才执行其他容器

4.1、单个initContainers

 # 编写yaml
cat << EOF > initcontainers.yaml
apiVersion: v1
kind: Pod
metadata:
  name: init-test
  labels:
    app: myapp
spec:
  containers:
  - name: container
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo The app is running! && sleep 3600']
  initContainers:
  - name: init-test
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo init containers test && sleep 10']
EOF

# 执行yaml文件
kubectl apply -f initcontainers.yaml

4.2、多个initContainers

# 多个initContainers 将顺序执行完后,再执行其他容器
cat << EOF > mu-initcontainers.yaml
apiVersion: v1
kind: Pod
metadata:
  name: init-test-mu
  labels:
    app: myapp
spec:
  containers:
  - name: container01
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo The app container01 is running! && sleep 60000']
  - name: container02
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo The app container02 is running! && sleep 60000']
  initContainers:
  - name: init-test01
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo init containers test01 && sleep 10']
  - name: init-test02
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo init containers test02 && sleep 10']
EOF

# 执行yaml文件
kubectl apply -f mu-initcontainers.yaml

# 新开个终端监控执行情况
# 通过watch来监听(只有变化时才刷新)
kubectl get pods --watch
# -n1 指定1秒刷新一次
watch -n1 'kubectl get pods'

4.3、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

5、控制器(Controller )

5.1、 ReplicaSet在新窗口打开

# 创建一个目录
mkdir controll
cd controll
# 创建replicaset
cat << EOF > replicaset.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: nginx
  labels:
    app: rs-nginx
spec:
  # modify replicas according to your case
  replicas: 2
  selector:
    matchLabels:
      app: pod-nginx
  template:
    metadata:
      labels:
        app: pod-nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
EOF

# 执行yaml文件创建
kubectl create -f replicaset.yaml
# 查看ReplicaSet资源
kubectl get replicasets.apps
# NAME:replicasets名称
# DESIRED: 请求的副本数
# CURRENT:实际运行的副本数
# READY:副本数为READY的数量
# AGE:生命周期

5.1.1、 调整ReplicaSet副本数

# 方法一:修改本地yaml文件的replicas: 3
vim replicaset.yaml
kubectl apply -f replicaset.yaml
# 方法二:编辑已经运行资源的yaml文件:kubectl edit replicasets.apps <replicaset_name>   
kubectl edit replicasets.apps nginx 
# 方法三:通过命令行:kPubectl scale replicasets.apps --replicas=1 <replicaset_name>  
kubectl scale replicasets.apps --replicas=1 nginx 
# 删除replicaset
kubectl delete -f replicaset.yaml

5.2、Deployment在新窗口打开

# 方法一:命令行创建
kubectl run nginx-app --image=nginx:1.9.0 --image-pull-policy=IfNotPresent --replicas=2
# 删除nginx-app
kubectl delete deployments.apps nginx-app

# 方法二:yaml创建
cat << EOF > nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx-deploy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-pod
  template:
    metadata:
      labels:
        app: nginx-pod
    spec:
      containers:
      - name: nginx
        image: nginx:1.7.6
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
EOF
# 执行yaml文件创建
kubectl create -f nginx-deployment.yaml

# 查看创建的Deployment资源
kubectl get deployments.apps
# 查看nginx-deployment 信息
kubectl describe deployments.apps nginx-deployment 
# READY:Pod READY的数量
# UP-TO-DATE: 升级最新的Pod数量
# AVAILABLE:可用的Pod数量
# AGE:资源的生命周期

5.2.1、 升级image

# 方法一:更改本地yaml,并使用apply升级 nginx:1.14.2

# 方法二:使用edit在线更改运行的Deployment,修改container的image
kubectl edit deployments.apps nginx-deployment 
# 方法三:通过命令行升级并记录升级信息:
kubectl set image deployments.apps nginx-deployment nginx=nginx:latest --record 
# 查看deployment升级的状态
kubectl rollout status deployment nginx-deployment

5.2.2、 回滚

# 查看历史记录
kubectl rollout history deployment nginx-deployment   

# 查看某一个记录的详细信息
kubectl rollout history deployment nginx-deployment --revision=1

# 执行回滚操作
 kubectl rollout undo deployment nginx-deployment --to-revision=1
# pause 暂停后的deployment将不能更新,回滚 
kubectl rollout pause deployment nginx-deployment 
# resume恢复: 
kubectl rollout resume deployment nginx-deployment 

5.3、DaemonSet在新窗口打开

cat << EOF > daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        imagePullPolicy: IfNotPresent
EOF
# 执行yaml文件创建
kubectl create -f daemonset.yaml

# 查看daemonset
kubectl get daemonset
# DESIRED: 请求的副本数
# CURRENT:实际运行的副本数
# READY:副本数为READY的数量
# UP-TO-DATE: 升级最新的Pod数量
# AVAILABLE:可用的Pod数量
# NODE SELECTOR: 节点选择器
# AGE:生命周期

5.4、StatefulSet在新窗口打开

cat << EOF > statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  selector:
    matchLabels:
      app: nginx # has to match .spec.template.metadata.labels
  serviceName: "nginx"  # $(podname).(headless server name).namespace.svc.cluster.local
  replicas: 3 # by default is 1
  template:
    metadata:
      labels:
        app: nginx # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          name: web
EOF

# 执行yaml文件创建
kubectl create -f statefulset.yaml 

# 查看statefulset
kubectl get statefulsets.apps

5.5、Job and CronJob

5.5.1、Job在新窗口打开

cat << EOF > job.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: pi
spec:
  completions: 10
  parallelism: 2
  template:
    spec:
      containers:
      - name: pi
        image: perl
        imagePullPolicy: IfNotPresent
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: Never
  backoffLimit: 4
EOF

# backoffLimit 判断 Job 失败前的重试次数
# 执行yaml文件创建
kubectl create -f job.yaml

# 查看Job资源
# NAME: Job的名字
# COMPLETIONS:完成的数量/总数量
# DURATION:持续时间
# AGE:生命周期
  • spec.template格式同Pod
  • RestartPolicy仅支持Never或OnFailure
  • 单个Pod时,默认Pod成功运行后Job即结束
  • .spec.completions标志Job结束需要成功运行的Pod个数,默认为1
  • .spec.parallelism标志并行运行的Pod的个数,默认为1
  • spec.activeDeadlineSeconds标志失败Pod的重试最大时间,超过这个时间不会继续重试

5.5.2、 CronJob在新窗口打开

cat << EOF > cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: hello
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: hello
            image: busybox
            imagePullPolicy: IfNotPresent
            args:
            - /bin/sh
            - -c
            - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure
EOF

# 执行yaml文件创建
kubectl create -f cronjob.yaml

# 查看CronJob
# SCHEDULE:时间表
# SUSPEND:暂停
# ACTIVE:激活的任务
# LAST SCHEDULE:最后执行的时间

5.6、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

6、服务发现(service)

6.1、Service在新窗口打开

6.1.1、创建ClusterIP

# 方法一:通过命令行创建 (target-port:容器端口;port:映射端口)
# kubectl expose deployment <nginx-name> --target-port=80 --port=80 --type=ClusterIP
kubectl run --image=nginx:1.9.0 --image-pull-policy=IfNotPresent --labels=app=nginx-pod nginx-deployment
kubectl expose deployment nginx-deployment --target-port=80 --port=18080 --type=ClusterIP

kubectl delete service nginx-deployment 

mkdir service
cd service

#方法二:通过yaml文件创建
cat << EOF > clusterip.yaml
kind: Service
apiVersion: v1
metadata:
  name: my-service
spec:
  type: ClusterIP                                               
  selector:
    app: nginx-pod
  ports:
  - protocol: TCP
    port: 18080
    targetPort: 80
EOF

# 执行yaml文件创建
kubectl create -f clusterip.yaml

# 查看service
kubectl get service
# curl <CLUSTER-IP>:18080 尝试访问看是否显示nginx欢迎页面
# 清理环境
kubectl delete deployment nginx-deployment
kubectl delete -f clusterip.yaml

6.1.2、 创建NodePort

# 方法一:通过命令行创建
# kubectl expose deployment <nginx-name> --target-port=80 --port=80 --type=NodePort
kubectl run --image=nginx:1.9.0 --image-pull-policy=IfNotPresent --labels=app=nginx-pod nginx-deployment
kubectl expose deployment nginx-deployment --target-port=80 --port=18081 --type=NodePort
kubectl delete service nginx-deployment 

#方法二:通过yaml文件创建
cat << EOF > nodeport.yaml
kind: Service
apiVersion: v1
metadata:
  name: my-nodeport-service
spec:
  type: NodePort                                               
  selector:
    app: nginx-pod
  ports:
  - protocol: TCP
    port: 18081
    targetPort: 80
EOF
# 执行yaml文件创建
kubectl create -f nodeport.yaml

# 查看service
kubectl get service
# 通过本地主机IP加nodeport端口进行访问
# curl <node01/node02>:30000

# 清理环境
kubectl delete deployment nginx-deployment
kubectl delete -f nodeport.yaml

6.1.3、 Headless Services

cat << EOF > headless-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  selector:
    matchLabels:
      app: nginx # has to match .spec.template.metadata.labels
  serviceName: "nginx"
  replicas: 3 # by default is 1
  template:
    metadata:
      labels:
        app: nginx # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          name: web
EOF
# 执行yaml文件创建
kubectl create -f headless-service.yaml

# cluster-ip为None
# 查看service
kubectl get service

# 通过命令运行一个busybox来解析
kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 --image-pull-policy=IfNotPresent sh
nslookup nginx
ping web-0.nginx

6.1.4、ExternalName

cat << EOF > external-name.yaml
apiVersion: v1
kind: Service
metadata:
  name: rancher
spec:
  type: ExternalName
  externalName: www.rancher.cn
EOF
# 执行yaml文件创建
kubectl create -f external-name.yaml
# 查看service
kubectl get service
# 通过busybox:1.28来解析测试

6.1.5、External IPs

# 通过命令运行一个nginx deployment
kubectl run --image=nginx:1.9.0 --image-pull-policy=IfNotPresent --labels=app=nginx nginx-deployment

# 将nginx deployment 18080端口暴漏到externalIPs 
cat << EOF > external-ip.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-service
spec:
  selector:
    app: nginx
  ports:
    - name: http
      protocol: TCP
      port: 18080
      targetPort: 80
  externalIPs:
    - 172.24.228.171
EOF
# 执行yaml文件创建
kubectl create -f external-ip.yaml
# 查看service
kubectl get service
# 通过externalIPs+port访问

6.2、服务发现

# 通过命令运行一个busybox来解析
kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 --image-pull-policy=IfNotPresent sh
nslookup nginx

# 通过yaml来创建busybox来测试域名解析 nslookup [service-name]
cat << EOF > busybox.yaml
apiVersion: v1
kind: Pod
metadata:
    name: busybox1
    labels:
      name: busybox
spec:
    hostname: busybox-1
    subdomain: default-subdomain
    containers:
    - image: busybox:1.28
      imagePullPolicy: IfNotPresent
      command:
        - sleep
        - "3600"
      name: busybox
EOF
# 执行yaml文件创建
kubectl create -f busybox.yaml
kubectl exec -it busybox1 sh

# nslookup [service-name] 来测试域名解析
# 每个容器中添加kube-dns的IP,会把自己的集群IP写入host
# hostaliases 主机别名
cat << EOF > hostaliases.yaml
apiVersion: v1
kind: Pod
metadata:
  name: hostaliases-pod
spec:
  restartPolicy: Never
  hostAliases:
  - ip: "127.0.0.1"
    hostnames:
    - "foo.local"
    - "bar.local"
  - ip: "10.1.2.3"
    hostnames:
    - "foo.remote"
    - "bar.remote"
  containers:
  - name: cat-hosts
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - cat
    args:
    - "/etc/hosts"
EOF
# 执行yaml文件创建
kubectl create -f hostaliases.yaml

# 查看日志
kubectl logs hostaliases-pod

6.3、Ingress 在新窗口打开

6.3.1、 创建两个deployment并暴漏服务

# run 两个deployment
kubectl run --image=nginx --image-pull-policy=IfNotPresent  --labels=run=server1 server1
kubectl run --image=nginx --image-pull-policy=IfNotPresent  --labels=run=server2 server2

# 通过expose 暴漏服务 <服务暴漏方式1>
kubectl expose deployment server1 --name=service1 --port=4200 --target-port=80
kubectl expose deployment server2 --name=service2 --port=8080 --target-port=80

# 进入server1容器(注意server1-后按tab补全)
# kubectl exec -it server1-686b568985-5wlpn bash
kubectl exec -it server1- bash
cd /usr/share/nginx/html
echo 'this is server1' > index.html

# 进入server2容器(注意server2-后按tab补全)
# kubectl exec -it server2-8b7967d47-9jn4k bash
kubectl exec -it server2- bash
cd /usr/share/nginx/html
echo 'this is server2' > index.html


# 编写yaml 暴漏服务 <服务暴漏方式2>
cat << EOF > ingress-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: service1
spec:
  selector:
    run: server1
  ports:
    - protocol: TCP
      port: 4200
      targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: service2
spec:
  selector:
    run: server2
  ports:
    - protocol: TCP
      port: 8080
      targetPort: 80
EOF
# 执行yaml文件创建
kubectl create -f ingress-service.yaml

6.3.2、创建ingress

cat << EOF > ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: simple-fanout-example
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - host: foo.bar.com
    http:
      paths:
      - path: /foo
        backend:
          serviceName: service1
          servicePort: 4200
      - path: /bar
        backend:
          serviceName: service2
          servicePort: 8080
EOF
# 执行yaml文件创建
kubectl create -f ingress.yaml   
# 查看ingress (ADDRESS中的IP列表为安装了nginx-ingress的节点)
kubectl get ingresses.

# hosts表添加node01的ip地址+域名
vim /etc/hosts 
172.31.53.73    node01 foo.bar.com

#本地如测试配置node01对应的外网地址到host
# 47.113.202.161 foo.bar.com

# 测试ingress
curl foo.bar.com/foo
curl foo.bar.com/bar

6.4、会话保持

# run 三个deployment
kubectl run --image=nginx:1.9.0 --image-pull-policy=IfNotPresent --labels=app=nginx --replicas=3 nginx-deployment
# 进入容器修改index.html
# 进入nginx-deployment容器(注意nginx-deployment-后按tab补全)
# kubectl exec -it nginx-deployment-79bbfbc7c-gdlxk bash
kubectl exec -it nginx-deployment-  bash
# 第一个容器
echo 111 > /usr/share/nginx/html/index.html
# 第二个容器
echo 222 > /usr/share/nginx/html/index.html
# 第三个容器
echo 333 > /usr/share/nginx/html/index.html

# 将sessionaffinity参数改为ClientIP,并添加annotations cookie注释。
cat << EOF > session-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: session-service
  annotations:
    nginx.ingress.kubernetes.io/affinity: cookie
    nginx.ingress.kubernetes.io/session-cookie-name: route
    nginx.ingress.kubernetes.io/session-cookie-hash: sha1
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 4200
      targetPort: 80
  sessionAffinity: ClientIP
EOF
# 执行yaml文件创建
kubectl create -f session-service.yaml

# 不添加sessionAffinity: ClientIP
# [root@master01 service]# curl 10.43.11.84:4200
# 333
# [root@master01 service]# curl 10.43.11.84:4200
# 111
# [root@master01 service]# curl 10.43.11.84:4200
# 111
# [root@master01 service]# curl 10.43.11.84:4200
# 222

# 添加sessionAffinity: ClientIP
# [root@master01 service]# curl 10.43.11.84:4200
# 333
# [root@master01 service]# curl 10.43.11.84:4200
# 333

6.5、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

kubectl delete deployments.apps nginx-deployment 

7、健康检查在新窗口打开

7.1、 Liveness Probe

7.1.1、 liveness command

# exec 通过执行命令来检查
cat << EOF > pod-liveness-command.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-exec
spec:
  containers:
  - name: liveness
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    args:
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 20; rm -rf /tmp/healthy; sleep 600
    livenessProbe:
      exec:
        command:
        - cat
        - /tmp/healthy
      initialDelaySeconds: 5
      periodSeconds: 5
EOF
# 执行yaml文件创建
kubectl create -f pod-liveness-command.yaml
# 查看容器中tmp目录下的文件
kubectl exec -it liveness-exec -- ls tmp
# 监控pod变化,每秒刷新一次
# 通过查看发现liveness-exec的RESTARTS 在30秒后由于检测到不健康一直在重启
watch -n1 'kubectl get pod -o wide '

7.1.2、 liveness HTTP request

# http 存活检查
cat << EOF > pod-liveness-http.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-http
spec:
  containers:
  - name: liveness
    image: seedoflife/liveness
    imagePullPolicy: IfNotPresent
    args:
    - /server
    livenessProbe:
      httpGet:
        path: /healthz
        port: 8080
        httpHeaders:
        - name: X-Custom-Header
          value: Awesome
      initialDelaySeconds: 3
      periodSeconds: 3
EOF

# 执行yaml文件创建
kubectl apply -f pod-liveness-http.yaml 
# 查看Pod信息
kubectl get pods -o wide
# 找到pod的cluster-id+端口进行访问,正常状态返回200
curl -v 10.42.4.56:8080/healthz

#  < HTTP/1.1 200 OK
#  < Date: Fri, 17 Apr 2020 12:54:43 GMT

#  error检测异常
#  < HTTP/1.1 500 Internal Server Error    

7.1.3、 TCP liveness probe

# tcp 存活检查
cat << EOF > pod-liveness-tcp.yaml
apiVersion: v1
kind: Pod
metadata:
  name: liveness-readiness-tcp
  labels:
    app: liveness-readiness-tcp
spec:
  containers:
  - name: liveness-readiness-tcp
    image: python:2.7
    imagePullPolicy: IfNotPresent
    command: ["bash", "-c", "echo test > index.html && sleep 30 && python -m SimpleHTTPServer 8080"] 
    ports:
    - containerPort: 8080
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 15
      periodSeconds: 20
EOF
# apply Deployment
kubectl apply -f pod-liveness-tcp.yaml 
# 访问8080
curl 10.42.3.39:8080

# 参看30秒以内pod的状态
kubectl get pod --watch           

7.2、 readinessProbe + livenessProbe + service

# 存活与就绪检测
cat << EOF > service-healthcheck.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: service-health
spec:
  replicas: 2
  selector:
    matchLabels:
      app: service-health
  template:
    metadata:
      labels:
        app: service-health
    spec:
      containers:
      - name: service-health
        image: python:2.7
        imagePullPolicy: IfNotPresent
        command: ["/bin/bash","-c","echo \$(hostname) > index.html && sleep 30 && python -m SimpleHTTPServer 8080"]
        ports:
        - containerPort: 8080
        readinessProbe:
          tcpSocket:
            port: 8080
          initialDelaySeconds: 35
          periodSeconds: 10
        livenessProbe:
          tcpSocket:
            port: 8080
          initialDelaySeconds: 10
          periodSeconds: 10
EOF

# apply Deployment
kubectl apply -f service-healthcheck.yaml
# 创建service
kubectl expose deployment service-health
# 查看service
kubectl get service
# 检测服务CLUSTER-IP+PORT
curl 10.109.21.81:8080

7.3、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

kubectl delete service service-health 

8.pod节点分配在新窗口打开

8.1、指定 nodeName

# 创建目录
mkdir node
cd node
# 指定node
cat <<EOF> nodeName.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeName: node01
EOF
# 执行yaml文件创建
kubectl create -f nodeName.yaml
# 查看pod是否分配到指定的nodeName
kubectl get pods -o wide

8.2、 nodeSelector

8.2.1、 node节点添加label

# 给node02打一个label
kubectl label nodes node02 disktype=ssd
# 查看node01label详情
kubectl get nodes node02 --show-labels 
# NAME     STATUS   ROLES    AGE     VERSION   LABELS
# node01   Ready    <none>   6d16h   v1.12.2       beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node01

8.2.2、使用yaml创建pod,指定nodeSelector

cat << EOF > pod-nodeSelector.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-selector
  labels:
    env: test
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd
EOF
# 执行yaml文件创建
kubectl apply -f pod-nodeSelector.yaml
# 查看pod是否分配到指定的nodeSelector
kubectl get pods -o wide

8.3、Affinity and anti-affinity

8.3.1、 Node affinity

# 给node01打个标签app=nginx1
kubectl label nodes node01 app=nginx1
# 硬性要求
cat << EOF > node-affinity-required.yaml
apiVersion: v1
kind: Pod
metadata:
  name: node-affinity-required
  labels:
    app: nginx1
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: app
            operator: In
            values:
            - nginx1
  containers:
  - name: with-node-affinity
    image: nginx
    imagePullPolicy: IfNotPresent
EOF
# 执行yaml文件创建
kubectl create -f node-affinity-required.yaml
# 查看pod是否分配到指定的nodeSelector
kubectl get pods -o wide
# 软性要求
cat << EOF > node-affinity-preferred.yaml
apiVersion: v1
kind: Pod
metadata:
  name: node-affinity-preferred
spec:
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:                                    
      - weight: 1                                                                       
        preference:
          matchExpressions:
          - key: disktype
            operator: In
            values:
            - ssd
  containers:
  - name: with-node-affinity
    image: nginx
    imagePullPolicy: IfNotPresent
EOF
# 软性要求 preferredDuringSchedulingIgnoredDuringExecution
# weight 设置权重,分数越高优先级越高
# 执行yaml文件创建
kubectl create -f node-affinity-preferred.yaml
# 查看pod是否分配到打了label disktype=ssd的节点
kubectl get pods -o wide

# 偏好要求
kubectl label nodes node02 app=nginx2

cat << EOF > node-affinity.yaml
apiVersion: v1
kind: Pod
metadata:
  name: with-node-affinity
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: app
            operator: In
            values:
            - nginx1
            - nginx2
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 1
        preference:
          matchExpressions:
          - key: disktype
            operator: In
            values:
            - ssd
  containers:
  - name: with-node-affinity
    image: nginx
    imagePullPolicy: IfNotPresent
EOF
# 执行yaml文件创建
kubectl create -f node-affinity.yaml
# 查看pod分配是否符合设置的规则
kubectl get pods -o wide

8.3.2、pod affinity and anti-affinity

cat << EOF > pod-affinity.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis-cache
spec:
  selector:
    matchLabels:
      app: store
  replicas: 3
  template:
    metadata:
      labels:
        app: store
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - nginx1
            # 拓扑域,用来判定拥有哪些标签的节点是同一个位置
            topologyKey: "kubernetes.io/hostname"		
      containers:
      - name: redis-server
        image: nginx
        imagePullPolicy: IfNotPresent
EOF
# 执行yaml文件创建
kubectl create -f pod-affinity.yaml
cat << EOF > pod-affinity2.yaml
# 创建web-server
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-server
spec:
  selector:
    matchLabels:
      app: web-store
  replicas: 3
  template:
    metadata:
      labels:
        app: web-store
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - web-store
            topologyKey: "kubernetes.io/hostname"
        podAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - store
            topologyKey: "kubernetes.io/hostname"
      containers:
      - name: web-app
        image: nginx
        imagePullPolicy: IfNotPresent
EOF
# 执行yaml文件创建
kubectl create -f pod-affinity2.yaml

8.4、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .
kubectl label nodes node01 app-
kubectl label nodes node02 app-
kubectl label nodes node02 disktype-

9、污点与宽容在新窗口打开

9.1、污点

# 设置污点
kubectl taint node node01 app=test:NoSchedule

#查看node01详细信息,可看到打的污点 Taints属性
kubectl describe nodes node01

# 取消污点
kubectl taint node node01 app:NoSchedule-

9.2、宽容

# master节点运行应用副本
cat <<EOF> fluentd.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      - key: node-role.kubernetes.io/controlplane
        operator: "Exists"
        effect: NoSchedule
      - key: node-role.kubernetes.io/etcd
        operator: "Exists"
        effect: NoExecute
      containers:
      - name: fluentd-elasticsearch
        image: nginx
        imagePullPolicy: IfNotPresent
EOF

# 执行yaml文件创建
kubectl create -f fluentd.yaml        

# 查看pods,是否master节点也调度过来了
kubectl get pods -o wide
### 给node设置污点
kubectl taint node node01 ssd=:NoSchedule
kubectl taint node node02 app=nginx-1.9.0:NoSchedule
kubectl taint node node02 test=test:NoSchedule

### 创建pod+tolerations
cat << EOF > tolerations-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test1
spec:
  tolerations:
  - key: "ssd"
    operator: "Exists"                     
    effect: "NoSchedule"
  containers:
  - name: demo
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
---
apiVersion: v1
kind: Pod
metadata:
  name: test2
spec:
  tolerations:
  - key: "app"
    operator: "Equal"
    value: "nginx-1.9.0"
    effect: "NoSchedule"
  - key: "test"
    operator: "Equal"
    value: "test"
    effect: "NoSchedule"
  containers:
  - name: demo
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"]
EOF
        
# 执行yaml文件创建
kubectl create -f tolerations-pod.yaml

# 查看pods调度情况
kubectl get pods -o wide

9.3、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .
kubectl taint node node01 ssd:NoSchedule-
kubectl taint node node02 app:NoSchedule-
kubectl taint node node02 test:NoSchedule-

10、应用弹性伸缩在新窗口打开

10.1、部署带有资源请求的Deployment

# 编写Deployment
cat << EOF > nginx-resources.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-service
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-pod
  template:
    metadata:
      labels:
        app: nginx-pod
    spec:
      containers:
      - name: nginx
        image: nginx:1.9.0
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        resources:
          requests:
            cpu: "100m"
EOF
# 执行yaml文件创建
kubectl create -f nginx-resources.yaml 
# kubectl expose deployment nginx-service
kubectl expose deployment nginx-service --target-port=80 --port=80 --type=ClusterIP
# 创建HPA
kubectl autoscale deployment nginx-service --max=10 --min=1 --cpu-percent=80

10.2、增加cpu负载验证pod是否扩容

# 新开一个终端监控
watch -n1 'kubectl top pod && kubectl get hpa && kubectl get pod'

# 安装压力测试工具WebBench
yum -y install gcc git
git clone https://github.com/EZLippi/WebBench.git
cd WebBench 
make;make install

# 压力测试,递增的扩容,并不是一次扩容到最大值(1000个客户端并发180秒,IP为service暴露的集群IP)
webbench -c 1000 -t 180 http://10.43.77.199/

10.3、清理环境

# 删除当前目录yaml文件所创建的资源
kubectl delete -f .

kubectl delete horizontalpodautoscalers.autoscaling nginx-service 

11、集群安全配置与管理

11.1、Namespaces资源配额在新窗口打开

# 创建namespaces
kubectl create namespace compute
# 创建计算资源配额
cat << EOF > compute-resources.yaml 
apiVersion: v1
kind: ResourceQuota
metadata:
  name: compute
  namespace: compute
spec:
  hard:
    cpu: 5
    memory: 2Gi
    pods: "10"
    services: "2"
    requests.cpu: 500m
    limits.memory: 1Gi
    persistentvolumeclaims: "2"
    services.nodeports: "2"
    configmaps: "2"
    secrets: "2"
EOF

# 创建ResourceQuota
kubectl create -f compute-resources.yaml 

# 查看配额的最新信息
kubectl -n compute describe resourcequotas compute 

# 创建一个pod来测试配置的命名空间资源 
cat << EOF > test-mem-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memory-demo
  namespace: compute
spec:
  containers:
  - name: memory-demo-2-ctr
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    resources:
      requests:
        memory: "50Mi"
        cpu: "0.5"
      limits:
        memory: "100Mi"
        cpu: "0.5"
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "50M", "--vm-hang", "1"]
EOF
# 创建pod
kubectl create -f test-mem-demo.yaml

# 查看pod -n 指定某个命名空间
kubectl get pods -n compute

11.2、应用资源限制

11.2.1、内存限制在新窗口打开

cat << EOF > stress-mem.yaml
apiVersion: v1
kind: Pod
metadata:
  name: memory-demo
spec:
  containers:
  - name: memory-demo-2-ctr
    image: polinux/stress
    imagePullPolicy: IfNotPresent
    resources:
      requests:
        memory: "50Mi"
      limits:
        memory: "100Mi"
    command: ["stress"]
    args: ["--vm", "1", "--vm-bytes", "250M", "--vm-hang", "1"]
EOF

# 创建pod
kubectl apply -f stress-mem.yaml

# 查看会发现memory-demo-2这个pod状态变为OOMKilled,因为它是内存不足所以显示Container被杀死
kubectl get pod
# NAME            READY   STATUS      RESTARTS   AGE
# memory-demo     1/1     Running     0          7m13s
# memory-demo-2   0/1     OOMKilled   2          20s

# 查看pod明细
kubectl describe pods memory-demo 

11.2.2、 CPU限制在新窗口打开

cat << EOF > stress-cpu.yaml
apiVersion: v1
kind: Pod
metadata:
  name: cpu-demo
spec:
  containers:
  - name: cpu-demo-ctr
    image: vish/stress
    resources:
      limits:
        cpu: "1"
      requests:
        cpu: "0.5"
    args:
    - -cpus
    - "2"
EOF

# 创建pod
kubectl create -f stress-cpu.yaml
# 查看cpu使用
kubectl top pod cpu-demo 
# NAME       CPU(cores)   MEMORY(bytes)   
# cpu-demo   1001m        0Mi 

11.3、Limit Range在新窗口打开

# 创建一个命名空间 default-cpu-example
kubectl create namespace default-cpu-example
# 设置pod一个默认的request和limit值
cat << EOF > stress-cpu-limit.yaml
apiVersion: v1
kind: LimitRange
metadata:
  name: cpu-limit-range
  namespace: default-cpu-example
spec:
  limits:
  - default:
      cpu: 1
    defaultRequest:
      cpu: 0.5
    type: Container
EOF

# 执行yaml文件创建
kubectl create -f stress-cpu-limit.yaml

场景一:不配置resources

cat << EOF > default-cpu-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: default-cpu-demo
  namespace: default-cpu-example
spec:
  containers:
  - name: default-cpu-demo-ctr
    image: nginx
EOF

# 部署pod
kubectl apply -f default-cpu-demo.yaml

# 查看pod yaml配置  -o 输出格式 -n 指定命令空间 <简写>
kubectl get pod -n default-cpu-example default-cpu-demo  -o yaml
# 可以看见Resource这里自动配置了limit和request,这个就是继承LimitRange

场景二:只配置pod Resource的limit不配置request 使用以下yaml文件创建pod

cat << EOF > default-cpu-demo2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: default-cpu-demo-2
  namespace: default-cpu-example
spec:
  containers:
  - name: default-cpu-demo-2-ctr
    image: nginx
    resources:
      limits:
        cpu: "1"
EOF
# 部署pod
kubectl apply -f default-cpu-demo2.yaml

# 查看pod yaml配置
kubectl get pod -n default-cpu-example default-cpu-demo-2 -o yaml
# 需要注意的是这里request值没有继承LimitRange配置的值0.5而是直接根limit相等。

场景三:指定容器请求值,不指定容器限额值 使用以下yaml文件创建pod test.yaml

cat << EOF > default-cpu-demo3.yaml
apiVersion: v1
kind: Pod
metadata:
  name: default-cpu-demo-3
  namespace: default-cpu-example
spec:
  containers:
  - name: default-cpu-demo-3-ctr
    image: nginx
    resources:
      requests:
        cpu: "0.75"
EOF
# 部署pod
kubectl apply -f default-cpu-demo3.yaml

# 查看pod yaml配置
kubectl get pod -n default-cpu-example default-cpu-demo-3  -o yaml
# 需要注意的是这里request值没有继承LimitRange配置的值,而是直接是我们在pod中配置的值,limit继承的是LimitRange的值

# 清理环境
kubectl delete -f .
kubectl delete namespaces compute default-cpu-example

总结: ● 如果没有在pod内设置request和limit默认就继承在namespace中配置的LimitRange。 ● 如果在pod只配置了Resource的limit,没配置request,这时request值不会继承LimitRange配置的值而是直接根pod中配置limit相等。 ● 如果在pod中配置了request没有配置limit,这时request值以pod中配置的为准,limit值以namespace中的LimitRange为主。

11.4、Network Policies在新窗口打开

11.4.1、相同namespace的NetworkPolicy的隔离性

# 创建一个namespace
kubectl create namespace policy-demo

# 新开个终端监控pod,service状态
watch -n1 'kubectl get pods -n policy-demo -o wide && kubectl get service -n policy-demo'

# 创建pod
kubectl run --namespace=policy-demo nginx --replicas=1 --image=nginx --image-pull-policy=IfNotPresent --labels=app=nginx

# 创建service
kubectl expose --namespace=policy-demo deployment nginx --port=80

# 运行一个busybox
kubectl -n policy-demo run test --image=busybox:1.28 --image-pull-policy=IfNotPresent --labels=app=busybox --command -- sleep 3600000
# 测试访问test-XXX 实际的pod name
kubectl -n policy-demo exec -it test-6778df478f-gnkp8 -- wget -q nginx -O -

结果:访问正常

# 创建NetworkPolicy规则
cat <<EOF> default-deny.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: default-deny
  namespace: policy-demo
spec:
  podSelector:
    matchLabels: {}
EOF
# 创建网络规则
kubectl create -f default-deny.yaml

# 此规则表示拒绝pod连接policy-demo namespace下的pod
# 测试
kubectl -n policy-demo exec -it test-6778df478f-gnkp8 -- wget -q nginx -O -
# 可以看见被拒绝访问了
# wget: Download time out
#添加允许规则
cat <<EOF> access-nginx.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: access-nginx
  namespace: policy-demo
spec:
  podSelector:
    matchLabels:
      app: nginx
  ingress:
    - from:
      - podSelector:
          matchLabels:
            app: busybox
EOF
# 这条规则意思,允许,label为 app:busyboxy的pod访问policy-demo namespace下label为app:nginx的pod
# 部署访问策略
kubectl create -f  access-nginx.yaml

# 在次测试,可以访问成功
kubectl -n policy-demo exec -it test-6bdbf87b87-cx5r7 -- wget -q nginx -O -\
# 结果:访问成功

# 创建一个不是app: busybox的pod去测试访问
kubectl -n policy-demo run demo --image=busybox:1.28 --image-pull-policy=IfNotPresent --labels=app=test --command -- sleep 3600000

kubectl -n policy-demo exec -it demo-6778df478f-gnkp8 -- wget -q nginx -O -
# 结果:wget:Download time out
# 结论:同namespace下可以使用Policy在限制pod与pod之间的访问

# 清空环境
kubectl delete namespace policy-demo

11.4.2、不同namespace pod的隔离性

创建两个namespace policy-demo、policy-demo2,然后在policy-demo里面创建nginx-pod和对应的service和busybox,在policy-demo2里面创建busybox,两个namespace的busybox去访问policy-demo里面的nginx

kubectl create namespace policy-demo
kubectl create namespace policy-demo2

# 新开个终端监控pod,service状态
watch -n1 'kubectl get pods -n policy-demo -o wide && kubectl get pods -n policy-demo2 -o wide && kubectl get service -n policy-demo'
kubectl run --namespace=policy-demo nginx --replicas=1 --image=nginx --image-pull-policy=IfNotPresent --labels=app=nginx

kubectl expose --namespace=policy-demo deployment nginx --port=80
kubectl -n policy-demo2 run test --image=busybox:1.28 --image-pull-policy=IfNotPresent --labels=app=busybox --command -- sleep 3600000
# 测试访问正常,还没设置NetworkPolicy时分别从policy-demo和policy-demo2两个namespace去busybox去访问nginx,访问成功。
# 需要注意的是 policy-demo2去访问要接上namespace名
kubectl -n policy-demo2 exec -it test-6778df478f-gnkp8 -- wget -q nginx.policy-demo -O -
# 配置NetworkPolicy
cat <<EOF> default-deny.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: default-deny
  namespace: policy-demo
spec:
  podSelector:
    matchLabels: {}
EOF

# 部署NetworkPolicy
kubectl create -f default-deny.yaml
# 配置拒绝所有Policy,此时policy-demo2 namespace的busybox都不能访问了
kubectl -n policy-demo2 exec -it test-6778df478f-gnkp8 -- wget -q nginx.policy-demo -O -

# 配置允许policy-demo2下的app:busybox标签的POD访问policy-demo namespace下的app:nginx服务
cat <<EOF> namespace-access.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: namespace-access
  namespace: policy-demo
spec:
  podSelector:
    matchLabels:
      app: nginx
  ingress:
    - from:
      - namespaceSelector:
          matchLabels:
            project: policy-demo2
      - podSelector:
          matchLabels:
            app: busybox
EOF
# 部署NetworkPolicy
kubectl create -f namespace-access.yaml

# 测试访问
kubectl -n policy-demo2 exec -it test-6bdbf87b87-6vxkc -- wget -q nginx.policy-demo -O -
# 结果:wget:Download time out
# - namespaceSelector:
#     matchLabels:
#       project: policy-demo2

# 是namespaces的标签,想要夸namespaces访问通过需要给namespaces打标签并添加到matchLabels里。
kubectl label namespaces policy-demo2 project=policy-demo2
# 在测试
kubectl -n policy-demo2 exec -it test-6bdbf87b87-6vxkc -- wget -q nginx.policy-demo -O -
# 可以正常访问

11.3、清理环境

# 删除创建的命名空间
kubectl delete namespace policy-demo policy-demo2

12、数据存储在新窗口打开

12.1、创建Pod使用Volume

# 监听pod、pv、pvc
watch -n1 'kubectl get pods -o wide && kubectl get pv && kubectl get pvc'

# 创建volume
cat << EOF > test-volume.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  containers:
  - image: nginx
    imagePullPolicy: IfNotPresent
    name: test-container
    volumeMounts:
    - mountPath: /test-pd
      name: test-volume
  volumes:
  - name: test-volume
    hostPath:
      path: /data
EOF
# 执行yaml文件创建
kubectl create -f test-volume.yaml
# 容器中test-pd映射到主机的data目录
kubectl exec -it test-pd bash
cd test-pd
echo 12345 > index.html

# 到容器调度的主机查看data目录
cd /data
ls

12.2、 PV and PVC

12.2.1、 PV

# kubectl get pv

cat << EOF > pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: task-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/storage/pv1"
EOF
# 执行yaml文件创建
kubectl apply -f pv.yaml
# 查看PV
kubectl get pv
# NAME             CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
# task-pv-volume   10Gi       RWO            Retain           Available           manual                  7s

12.2.2、PVC

cat << EOF > pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: task-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi
EOF
# pvc中的请求storage不能大于pv中指定的容量,否则一直处于挂起状态
kubectl apply -f pvc.yaml

# 查看PV,已经bound
kubectl get pv
# NAME             CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                   STORAGECLASS   REASON   AGE
# task-pv-volume   10Gi       RWO            Retain           Bound    default/task-pv-claim   manual                  2m17s

# 查看PVC,已经bound
kubectl get pvc
# NAME            STATUS   VOLUME           CAPACITY   ACCESS MODES   STORAGECLASS   AGE
# task-pv-claim   Bound    task-pv-volume   10Gi       RWO            manual         9s

#创建一个pod使用pvc
cat << EOF > pod-pv.yaml
kind: Pod
apiVersion: v1
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
        claimName: task-pv-claim
  containers:
    - name: task-pv-container
      image: nginx
      imagePullPolicy: IfNotPresent
      ports:
        - containerPort: 80
          name: "http-server"
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage
EOF
# 执行yaml文件创建
kubectl apply -f pod-pv.yaml 
# 在master节点操作
kubectl exec -it task-pv-pod bash
cd /usr/share/nginx/html/
touch index.html
echo 11 > index.html 
exit
# 访问pod
curl 192.168.1.41

# 查看pod分配在哪个node
kubectl get pods -o wide
# 如果pod运行在node01,就去node01节点查看hostpath
cd /storage/
ls
# pv1
cd pv1/
ls
# index.html

# 清理环境
kubectl delete -f .
# node上建的目录,如需要清理到对应的节点上去删除
rm -rf /data
rm -rf /storage

12.3、StorageClass

12.3.1、 NFS环境准备在新窗口打开

# 在rancher-server 安装nfs server
yum -y install nfs-utils

# 启动服务,并设置为开机自启
systemctl enable --now nfs
# 创建共享目录
mkdir /storage

# 编辑nfs配置文件
sudo vim /etc/exports
/storage *(rw,sync,no_root_squash)

# 重启服务
systemctl restart nfs

# kubernetes集群计算节点(node节点)部署
yum -y install nfs-utils

# 在计算节点测试
mkdir /test
mount.nfs 172.24.228.243:storage /test
touch /test/123
# 到rancher-server storage 检查文件是否存在

# 查看/test对应的文件系统 
df -m
# 卸载mount的目录
umount /test
# 卸载后就可以删除test目录了
rm -rf /test

12.3.2、 StorageClass插件部署

mkdir nfs
# 下载系统插件:
# yum -y install git
# 国内服务器下载比较慢,从 demo/nfs-client去复制
# git clone https://github.com/kubernetes-incubator/external-storage.git
# 修改yaml信息
# cd /root/yaml/nfs/external-storage/nfs-client/deploy
vi class.yaml
vi deployment.yaml
vi rbac.yaml

# 修改deployment.yaml nfs-server ip、path
vim deployment.yaml
         env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 172.24.228.243
            - name: NFS_PATH
              value: /storage
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.24.228.243
            path: /storage

# 部署插件
kubectl apply -f rbac.yaml
kubectl apply -f deployment.yaml
kubectl apply -f class.yaml

# 查看storageclasses
kubectl get storageclasses.storage.k8s.io 
# NAME                  PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
# managed-nfs-storage   fuseim.pri/ifs   Delete          Immediate           false                  42s

12.3.3、 测试

##### 测试一:创建pvc后自动创建pv并bound
cat << EOF > pvc-nfs.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-test
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: managed-nfs-storage
  resources:
    requests:
      storage: 1Gi
EOF
# 执行yaml文件创建
kubectl apply -f pvc-nfs.yaml
##### 测试二:创建Pod,自动创建pvc与pv
cat << EOF > statefulset-pvc-nfs.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  selector:
    matchLabels:
      app: nginx # has to match .spec.template.metadata.labels
  serviceName: "nginx"
  replicas: 3 # by default is 1
  template:
    metadata:
      labels:
        app: nginx # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
    spec:
      accessModes: [ "ReadWriteMany" ]
      storageClassName: "managed-nfs-storage"
      resources:
        requests:
          storage: 1Gi
EOF
# 执行yaml文件创建
kubectl apply -f statefulset-pvc-nfs.yaml
# 测试三:将nfs的storageclass设置为默认,创建Pod不指定storageclass,申请pvc的资源是否成功
# 设置managed-nfs-storage为默认
kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# 测试,编写yaml文件不指定storageclass
cat <<EOF> statefulset2.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web2
spec:
  selector:
    matchLabels:
      app: nginx # has to match .spec.template.metadata.labels
  serviceName: "nginx"
  replicas: 2 # by default is 1
  template:
    metadata:
      labels:
        app: nginx # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: html
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi
EOF
# 执行yaml文件创建
kubectl apply -f statefulset2.yaml

# 清理环境
 kubectl delete -f .

12.4、ConfigMap

12.4.1、 环境变量使用

# 通过yaml文件创建env configmaps
cat << EOF > configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: test-config
data:
  username: lixw
  password: wiseda@1234
EOF
# 执行yaml文件创建
kubectl apply -f configmap.yaml
# 查看configmaps
kubectl get configmaps
# 查看test-config详情
kubectl get configmaps test-config -o yaml
### pod使用configmaps的env环境变量
cat << EOF > config-pod-env1.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-configmap-env-pod
spec:
  containers:
    - name: test-container
      image: radial/busyboxplus
      imagePullPolicy: IfNotPresent
      command: [ "/bin/sh", "-c", "sleep 1000000" ]
      envFrom:
      - configMapRef:
          name: test-config
EOF
# 执行yaml文件创建
kubectl apply -f config-pod-env1.yaml

# 查看容器中的环境变量
kubectl exec -it test-configmap-env-pod -- env

### pod命令行使用comfigmaps的env环境变量
cat << EOF > config-pod-env2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-configmap-command-env-pod
spec:
  containers:
    - name: test-container
      image: radial/busyboxplus
      imagePullPolicy: IfNotPresent
      command: [ "/bin/sh", "-c", "echo \$(MYSQLUSER) \$(MYSQLPASSWD); sleep 1000000" ]
      env:
        - name: MYSQLUSER
          valueFrom: 
            configMapKeyRef: 
              name: test-config
              key: username
        - name: MYSQLPASSWD
          valueFrom:
            configMapKeyRef: 
              name: test-config
              key: password
EOF
# 执行yaml文件创建
kubectl apply -f config-pod-env2.yaml
# 查看test-configmap-command-env-pod 日志
kubectl logs test-configmap-command-env-pod 

12.4.2、 Volume挂载使用

# 创建配置文件的configmap
echo 123 > index.html
kubectl create configmap web-config --from-file=index.html

# pod使用volume挂载
cat << EOF > test-configmap-volume-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-configmap-volume-pod
spec:
  volumes:
    - name: config-volume
      configMap:
        name: web-config
  containers:
    - name: test-container
      image: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: config-volume
        mountPath: /usr/share/nginx/html
EOF
# 执行yaml文件创建
kubectl apply -f test-configmap-volume-pod.yaml
# 访问测试
curl 10.42.4.12

# 检查html下的文件
kubectl exec -it test-configmap-volume-pod bash
cd /usr/share/nginx/html/
ls
# index.html
eixt
# subPath使用
cat << EOF > test-configmap-subpath.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-configmap-volume-subpath-pod
spec:
  volumes:
    - name: config-volume
      configMap:
        name: web-config
  containers:
    - name: test-container
      image: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: config-volume
        mountPath: /usr/share/nginx/html/index.html
        subPath: index.html
EOF
# 执行yaml文件创建
kubectl apply -f test-configmap-subpath.yaml

kubectl exec -it test-configmap-volume-subpath-pod bash
cd /usr/share/nginx/html/
ls
# 50x.html index.html
# 清理环境
kubectl delete -f .

12.5、Secret

12.5.1、 环境变量使用

##### 手动加密
echo -n 'admin' | base64
# YWRtaW4=

echo -n 'wiseda@1234' | base64
# d2lzZWRhQDEyMzQ=

##### 解密
echo 'YWRtaW4=' | base64 --decode
#返回结果:admin

echo 'd2lzZWRhQDEyMzQ=' | base64 --decode
#返回结果:wiseda@1234
###### 创建secret的yaml
cat << EOF > secret-env.yaml
apiVersion: v1
kind: Secret
metadata:
  name: mysecret-env
type: Opaque
data:
  username: YWRtaW4=
  password: d2lzZWRhQDEyMzQ=
EOF
# 执行yaml文件创建
kubectl apply -f secret-env.yaml
# 查看secrets
kubectl get secrets
# 查看secrets中mysecret-env详情
kubectl get secrets mysecret-env -o yaml


##### pod env使用secret
cat << EOF > secret-pod-env1.yaml
apiVersion: v1
kind: Pod
metadata:
  name: envfrom-secret
spec:
  containers:
  - name: envars-test-container
    image: nginx
    imagePullPolicy: IfNotPresent
    envFrom:
    - secretRef:
        name: mysecret-env
EOF
# 执行yaml文件创建
kubectl apply -f secret-pod-env1.yaml
# 查看容器中的环境变量
kubectl exec -it envfrom-secret -- env

# 自定义环境变更名
cat << EOF > secret-pod-env2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-env-secret
spec:
  containers:
  - name: mycontainer
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    command: [ "/bin/sh", "-c", "echo \$(SECRET_USERNAME) \$(SECRET_PASSWORD); sleep 1000000" ]
    env:
      - name: SECRET_USERNAME
        valueFrom:
          secretKeyRef:
            name: mysecret-env
            key: username
      - name: SECRET_PASSWORD
        valueFrom:
          secretKeyRef:
            name: mysecret-env
            key: password
EOF
# 执行yaml文件创建
kubectl apply -f secret-pod-env2.yaml
# 查看容器中的环境变量
kubectl exec -it pod-env-secret -- env
kubectl logs pod-env-secret


12.5.2、 Volume挂载

##### 创建配置文件的secret
kubectl create secret generic web-secret --from-file=index.html

##### volume挂在secret
cat << EOF > pod-volume-secret.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-volume-secret
spec:
  containers:
  - name: pod-volume-secret
    image: nginx
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: test-web
      mountPath: "/usr/share/nginx/html"
      readOnly: true
  volumes:
  - name: test-web
    secret:
      secretName: web-secret
EOF
# 执行yaml文件创建
kubectl apply -f pod-volume-secret.yaml
# 测试 (这里一样会覆盖html目录下的50x.html)
curl 10.42.4.15

12.5.3、 secret docker-registory

12.5.3.1、部署Harbor (安装到rancher-server)
# harbor 到rancher-server 上下载
cd /opt
# 下载离线安装包(建议香港或国外服务器,600+M,国内下载比较慢)
# wget https://github.com/goharbor/harbor/releases/download/v1.10.0/harbor-offline-installer-v1.10.0.tgz

# 下载在线安装包
wget https://docs.rancher.cn/download/harbor/harbor-online-installer-v1.10.2.tgz
# 解压
tar -xvf harbor-online-installer-v1.10.2.tgz
cd harbor
# 修改harbor配置文件
vim harbor.yml 
# 主要修改hostname为rancher-server ip,端口修改为8080,注释掉https的属性
hostname: 172.24.228.243
http:
  port: 8080
# https related config
# tps:
  # https port for harbor, default is 443
  # rt: 443
  # The path of cert and key files for nginx
  # rtificate: /your/certificate/path
  # ivate_key: /your/private/key/path

# 下载docker-compose
wget https://docs.rancher.cn/download/compose/v1.25.5-docker-compose-Linux-x86_64
chmod +x v1.25.5-docker-compose-Linux-x86_64 && mv v1.25.5-docker-compose-Linux-x86_64 /usr/local/bin/docker-compose

# 安装harbor之前需要在harbor安装目录下修改harbor.yml 文件
./install.sh

# rancher-server 外网IP:8080端口访问
http://47.113.197.219:8080
# 安装后,登录默认用户/密码 admin/Harbor12345
# 创建一个用户,密码如:
# user: lixw
# password: Wiseda@1234
# 再创建一个test项目,在项目里将新建的用户添加进去

# 修改docker daemon.json ,添加安全私有镜像仓库(各节点都需要添加,可以通过scp)
# scp /etc/docker/daemon.json node01:/etc/docker/daemon.json
# "insecure-registries": ["172.24.228.243:8080"],
systemctl restart docker
# 如在rancher-server 执行systemctl restart docker 后,要重启harbor
# docker-compose start

# docker login 私有仓库
docker login 172.24.228.243:8080

# 上传image到lixw用户的私有仓库中
docker tag nginx:latest 172.24.228.243:8080/test/nginx:latest
docker push 172.24.228.243:8080/test/nginx:latest

# 退出登录
docker logout 172.24.228.243:8080


12.5.3.2、创建secret
kubectl create secret docker-registry harbor-secret --docker-server=172.24.228.243:8080 --docker-username=lixw --docker-password=Wiseda@1234 --docker-email=lixw@qq.com

12.5.3.2、 创建Pod,调用imagePullSecrets
cat << EOF > harbor-sc.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx-deploy
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-pod
  template:
    metadata:
      labels:
        app: nginx-pod  
    spec:
      containers:
      - name: nginx
        image: 172.24.228.243:8080/test/nginx:latest
      imagePullSecrets:
      - name: harbor-secret
EOF
# 执行yaml文件创建
kubectl create -f harbor-sc.yaml


12.6、emptyDir

12.6.1、 emptyDir

cat << EOF > emptydir.yaml
apiVersion: v1
kind: Pod
metadata:
  name: emptydir-pod
  labels:
    app: myapp
spec:
  volumes:
  - name: storage
    emptyDir: {}
  containers:
  - name: myapp1
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: storage
      mountPath: /storage
    command: ['sh', '-c', 'sleep 3600000']
  - name: myapp2
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: storage
      mountPath: /storage
    command: ['sh', '-c', 'sleep 10000000']
EOF
# 执行yaml文件创建
kubectl apply -f emptydir.yaml
# 查看pod
kubectl get pod
# NAME           READY   STATUS    RESTARTS   AGE
# emptydir-pod   2/2     Running   0          2m21s
# (在storage里创建一个文件)
kubectl exec -it emptydir-pod -c myapp1 sh
 cd /storage/
 touch 123
 exit
 # (登录到第二个容器即可查看得到)
 kubectl exec -it emptydir-pod -c myapp2 sh 
 ls /storage/
 # 123

12.6.2、emptyDir + init-containers

cat << EOF > initcontainers.yaml
apiVersion: v1
kind: Pod
metadata:
  name: myapp-pod
  labels:
    app: myapp
spec:
  volumes:
  - name: storage
    emptyDir: {}
  containers:
  - name: myapp-containers
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: storage
      mountPath: /storage
    command: ['sh', '-c', 'if [ -f /storage/testfile ] ; then sleep 3600000 ; fi']
  initContainers:
  - name: init-containers
    image: radial/busyboxplus
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: storage
      mountPath: /storage
    command: ['sh', '-c', 'touch /storage/testfile && sleep 10']
EOF
# 执行yaml文件创建
kubectl apply -f initcontainers.yaml
# 查看pods
kubectl get pods
# NAME        READY   STATUS     RESTARTS   AGE
# myapp-pod   0/1     Init:0/1   0          8s

# 查看容器中storage的文件
kubectl exec -it myapp-pod -- ls /storage/
# testfile

12.7、清理环境

kubectl delete -f .
# 删除创建的configmaps、secrets
kubectl delete configmaps web-config
kubectl delete secrets web-secret harbor-secret


13、k8s网络

13.1、node节点中的nginx-proxy

# 在node节点执行(每个node都启了nginx-proxy)
docker exec -it nginx-proxy bash

cat /etc/nginx/nginx.conf 

# 
# worker_processes auto;
# events {
#   multi_accept on;
#  use epoll;
#   worker_connections 1024;
# }

# stream {
# 负载均衡配置 (默认轮询)、Weight、ip_hash
#         upstream kube_apiserver {            
#             server 172.16.102.10:6443;            
#             server 172.16.102.12:6443;            
#             server 172.16.102.11:6443;            
#         }
#         server {
# 监听本机6443端口,代理到kube_apiserver定义的服务列表
#             listen        6443;
#             proxy_pass    kube_apiserver;
#             proxy_timeout 30;
#             proxy_connect_timeout 2s;
#         }
# }


13.2、pod、service、ingress IP

# kube-system 提供的服务
kubectl get service -n kube-system 
# NAME             TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
# kube-dns         ClusterIP   10.43.0.10    <none>        53/UDP,53/TCP,9153/TCP   12m
# metrics-server   ClusterIP   10.43.61.74   <none>        443/TCP                  12m


kubectl get endpoints -n kube-system -o wide
# NAME                      ENDPOINTS                                            AGE
# kube-controller-manager   <none>                                               14m
# kube-dns                  10.42.3.2:53,10.42.4.2:53,10.42.3.2:53 + 3 more...   13m

# kube-dns IP 写入每个容器的resolv.conf文件
# 找个容器或找个pod (如找个容器执行,容器ID自行确认:docker exec -it 3f7f53567efd sh )
# kubectl exec -it test-pd bash

cat /etc/resolv.conf 
# nameserver 10.43.0.10

# service的 CLUSTER-IP将为kube-dns IP段
kubectl get service -o wide
# efk-kibana  NodePort    10.43.76.233   <none>   5601:32549/TCP      62d   app=kibana,release=efk

# pod的IP为Pod调度到某个node节点所对应flannel网络插件定义的IP段
ip a
kubectl get pods -o wide

# 查看ingress (ADDRESS为安装的nginx-ingress)
kubectl get ingresses.
# NAME              HOSTS         ADDRESS                       PORTS   AGE
# metric-alert      w.wiseda.cn   172.16.102.11,172.16.102.12   80      71d

上次更新:
编辑者: 李贤伟