• 首页 首页 icon
  • 工具库 工具库 icon
    • IP查询 IP查询 icon
  • 内容库 内容库 icon
    • 快讯库 快讯库 icon
    • 精品库 精品库 icon
    • 问答库 问答库 icon
  • 更多 更多 icon
    • 服务条款 服务条款 icon

82-云原生操作系统-Kubernetes资源对象管理和展示Ⅰ

武飞扬头像
mooreyxia
帮助1

本章内容:

  1. Kubernetes分层架构
  2. kubernetes API
  1. kubernetes 内置API
  2. kubernetes 自定义API
  1. kubernetes内置资源对象介绍
  2. Pod
  3. Job与cronjob
  4. RC/RS 副本控制器
  5. Deployment 副本控制器
  6. Service
  7. Volume-存储卷

Kubernetes资源对象管理及示例
资源对象概念
  • Kubernetes分层架构

学新通

  • Kubernetes API
  • 遵循RestFull API涉及理念
  • 声明式且面向对象
  • 表达式规范

学新通

  • 内置API
    查询集群内可使用API
#注意:不同版本的Kubernetes,API也会发生变化
#查询内置API
[root@K8s-ansible ~]#kubectl api-resources 
NAME                              SHORTNAMES   APIVERSION                             NAMESPACED   KIND
componentstatuses                 cs           v1                                     false        ComponentStatus
....

#查询授权可调用API
#表达式:
# curl --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer TOKEN" https://127.0.0.1:6443
# curl --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer TOKEN" https://127.0.0.1:6443/api
#查询示例
#1.获取用户及Token
[root@K8s-ansible ~]#kubectl get secrets -A|grep admin
kubernetes-dashboard   dashboard-admin-user              kubernetes.io/service-account-token   3      5d3h
kuboard                kuboard-admin-token               kubernetes.io/service-account-token   3      33h
[root@K8s-ansible ~]#kubectl describe secrets dashboard-admin-user -n kubernetes-dashboard |grep token
Type:  kubernetes.io/service-account-token
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImhIMHhCWW1iOFRhbXNjdDAyQUg5YVE3RUVuRjNxTDZReXhnUzJqbnRpTzQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZTAzYzUzZjQtZDE1OS00MDA4LTgwNGItOTcwOTEyZmU1NTZlIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.lVHgpVsH0G0Rsq-OLST8zTeH48GlLUZDcPTjYSAh1MnOFDhylKofJUjjv68t0nkQ71xZnsqEs89qekakC1UfkTmpRgbHjRVisYdPPqO7Y-D6RqDJUC_FMArPRZaTONta7ZKCs6j99zp8VrFB4BajBdNvpXJ1YsawCFE6ZNssVkL2Wjdy8mkpb8xYQX1XDrEvFaNHX67IRkcQDiF-k8rZeSOVvHlqzHKgeeg4OBblb2yNwVDc8X6FdmZXfTvA768t9rkmq1VJ4U2dRBmHAgMNZN5iD4YjNphNkCMzAZQJm4glkxvAD7nDpGX6CT_4boskv4jHOITbkXUjDPpf_VZyJg

#2.查询集群内当前用户所有可用API
[root@K8s-ansible ~]#curl --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImhIMHhCWW1iOFRhbXNjdDAyQUg5YVE3RUVuRjNxTDZReXhnUzJqbnRpTzQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZTAzYzUzZjQtZDE1OS00MDA4LTgwNGItOTcwOTEyZmU1NTZlIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.lVHgpVsH0G0Rsq-OLST8zTeH48GlLUZDcPTjYSAh1MnOFDhylKofJUjjv68t0nkQ71xZnsqEs89qekakC1UfkTmpRgbHjRVisYdPPqO7Y-D6RqDJUC_FMArPRZaTONta7ZKCs6j99zp8VrFB4BajBdNvpXJ1YsawCFE6ZNssVkL2Wjdy8mkpb8xYQX1XDrEvFaNHX67IRkcQDiF-k8rZeSOVvHlqzHKgeeg4OBblb2yNwVDc8X6FdmZXfTvA768t9rkmq1VJ4U2dRBmHAgMNZN5iD4YjNphNkCMzAZQJm4glkxvAD7nDpGX6CT_4boskv4jHOITbkXUjDPpf_VZyJg" https://192.168.11.241:6443
{
  "paths": [
    "/.well-known/openid-configuration",
    "/api",
    "/api/v1",
    "/apis",
    "/apis/",
    "/apis/admissionregistration.k8s.io",
    "/apis/admissionregistration.k8s.io/v1",
    "/apis/apiextensions.k8s.io",
    "/apis/apiextensions.k8s.io/v1",
    "/apis/apiregistration.k8s.io",
    "/apis/apiregistration.k8s.io/v1",
    "/apis/apps",
    "/apis/apps/v1",
    "/apis/authentication.k8s.io",
    "/apis/authentication.k8s.io/v1",
    "/apis/authorization.k8s.io",
    "/apis/authorization.k8s.io/v1",
    "/apis/autoscaling",
    "/apis/autoscaling/v1",
    "/apis/autoscaling/v2",
    "/apis/batch",
    "/apis/batch/v1",
    "/apis/certificates.k8s.io",
    "/apis/certificates.k8s.io/v1",
    "/apis/coordination.k8s.io",
    "/apis/coordination.k8s.io/v1",
    "/apis/discovery.k8s.io",
    "/apis/discovery.k8s.io/v1",
    "/apis/events.k8s.io",
    "/apis/events.k8s.io/v1",
    "/apis/flowcontrol.apiserver.k8s.io",
    "/apis/flowcontrol.apiserver.k8s.io/v1beta2",
    "/apis/flowcontrol.apiserver.k8s.io/v1beta3",
    "/apis/networking.k8s.io",
    "/apis/networking.k8s.io/v1",
    "/apis/node.k8s.io",
    "/apis/node.k8s.io/v1",
    "/apis/policy",
    "/apis/policy/v1",
    "/apis/rbac.authorization.k8s.io",
    "/apis/rbac.authorization.k8s.io/v1",
    "/apis/scheduling.k8s.io",
    "/apis/scheduling.k8s.io/v1",
    "/apis/storage.k8s.io",
    "/apis/storage.k8s.io/v1",
    "/apis/storage.k8s.io/v1beta1",
    "/apis/velero.io",
    "/apis/velero.io/v1",
    "/healthz",
    "/healthz/autoregister-completion",
    "/healthz/etcd",
    "/healthz/log",
    "/healthz/ping",
    "/healthz/poststarthook/aggregator-reload-proxy-client-cert",
    "/healthz/poststarthook/apiservice-openapi-controller",
    "/healthz/poststarthook/apiservice-openapiv3-controller",
    "/healthz/poststarthook/apiservice-registration-controller",
    "/healthz/poststarthook/apiservice-status-available-controller",
    "/healthz/poststarthook/bootstrap-controller",
    "/healthz/poststarthook/crd-informer-synced",
    "/healthz/poststarthook/generic-apiserver-start-informers",
    "/healthz/poststarthook/kube-apiserver-autoregistration",
    "/healthz/poststarthook/priority-and-fairness-config-consumer",
    "/healthz/poststarthook/priority-and-fairness-config-producer",
    "/healthz/poststarthook/priority-and-fairness-filter",
    "/healthz/poststarthook/rbac/bootstrap-roles",
    "/healthz/poststarthook/scheduling/bootstrap-system-priority-classes",
    "/healthz/poststarthook/start-apiextensions-controllers",
    "/healthz/poststarthook/start-apiextensions-informers",
    "/healthz/poststarthook/start-cluster-authentication-info-controller",
    "/healthz/poststarthook/start-kube-aggregator-informers",
    "/healthz/poststarthook/start-kube-apiserver-admission-initializer",
    "/healthz/poststarthook/start-kube-apiserver-identity-lease-controller",
    "/healthz/poststarthook/start-kube-apiserver-identity-lease-garbage-collector",
    "/healthz/poststarthook/start-legacy-token-tracking-controller",
    "/healthz/poststarthook/storage-object-count-tracker-hook",
    "/livez",
    "/livez/autoregister-completion",
    "/livez/etcd",
    "/livez/log",
    "/livez/ping",
    "/livez/poststarthook/aggregator-reload-proxy-client-cert",
    "/livez/poststarthook/apiservice-openapi-controller",
    "/livez/poststarthook/apiservice-openapiv3-controller",
    "/livez/poststarthook/apiservice-registration-controller",
    "/livez/poststarthook/apiservice-status-available-controller",
    "/livez/poststarthook/bootstrap-controller",
    "/livez/poststarthook/crd-informer-synced",
    "/livez/poststarthook/generic-apiserver-start-informers",
    "/livez/poststarthook/kube-apiserver-autoregistration",
    "/livez/poststarthook/priority-and-fairness-config-consumer",
    "/livez/poststarthook/priority-and-fairness-config-producer",
    "/livez/poststarthook/priority-and-fairness-filter",
    "/livez/poststarthook/rbac/bootstrap-roles",
    "/livez/poststarthook/scheduling/bootstrap-system-priority-classes",
    "/livez/poststarthook/start-apiextensions-controllers",
    "/livez/poststarthook/start-apiextensions-informers",
    "/livez/poststarthook/start-cluster-authentication-info-controller",
    "/livez/poststarthook/start-kube-aggregator-informers",
    "/livez/poststarthook/start-kube-apiserver-admission-initializer",
    "/livez/poststarthook/start-kube-apiserver-identity-lease-controller",
    "/livez/poststarthook/start-kube-apiserver-identity-lease-garbage-collector",
    "/livez/poststarthook/start-legacy-token-tracking-controller",
    "/livez/poststarthook/storage-object-count-tracker-hook",
    "/logs",
    "/metrics",
    "/openapi/v2",
    "/openapi/v3",
    "/openapi/v3/",
    "/openid/v1/jwks",
    "/readyz",
    "/readyz/autoregister-completion",
    "/readyz/etcd",
    "/readyz/etcd-readiness",
    "/readyz/informer-sync",
    "/readyz/log",
    "/readyz/ping",
    "/readyz/poststarthook/aggregator-reload-proxy-client-cert",
    "/readyz/poststarthook/apiservice-openapi-controller",
    "/readyz/poststarthook/apiservice-openapiv3-controller",
    "/readyz/poststarthook/apiservice-registration-controller",
    "/readyz/poststarthook/apiservice-status-available-controller",
    "/readyz/poststarthook/bootstrap-controller",
    "/readyz/poststarthook/crd-informer-synced",
    "/readyz/poststarthook/generic-apiserver-start-informers",
    "/readyz/poststarthook/kube-apiserver-autoregistration",
    "/readyz/poststarthook/priority-and-fairness-config-consumer",
    "/readyz/poststarthook/priority-and-fairness-config-producer",
    "/readyz/poststarthook/priority-and-fairness-filter",
    "/readyz/poststarthook/rbac/bootstrap-roles",
    "/readyz/poststarthook/scheduling/bootstrap-system-priority-classes",
    "/readyz/poststarthook/start-apiextensions-controllers",
    "/readyz/poststarthook/start-apiextensions-informers",
    "/readyz/poststarthook/start-cluster-authentication-info-controller",
    "/readyz/poststarthook/start-kube-aggregator-informers",
    "/readyz/poststarthook/start-kube-apiserver-admission-initializer",
    "/readyz/poststarthook/start-kube-apiserver-identity-lease-controller",
    "/readyz/poststarthook/start-kube-apiserver-identity-lease-garbage-collector",
    "/readyz/poststarthook/start-legacy-token-tracking-controller",
    "/readyz/poststarthook/storage-object-count-tracker-hook",
    "/readyz/shutdown",
    "/version"
  ]
  • 内置资源对象

学新通

  • kubernetes 资源对象操作命令
#API地址
https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/

学新通

  • 资源对象管理示例
kind: Deployment  #类型,是deployment控制器,kubectl explain  Deployment
apiVersion: extensions/v1beta1  #API版本,# kubectl explain  Deployment.apiVersion
metadata: #pod的元数据信息,kubectl explain  Deployment.metadata
  labels: #自定义pod的标签,# kubectl explain  Deployment.metadata.labels
    app: linux36-nginx-deployment-label #标签名称为app值为linux36-nginx-deployment-label,后面会用到此标签 
  name: linux36-nginx-deployment #pod的名称
  namespace: linux36 #pod的namespace,默认是defaule
spec: #定义deployment中容器的详细信息,kubectl explain  Deployment.spec
  replicas: 1 #创建出的pod的副本数,即多少个pod,默认值为1
  selector: #定义标签选择器
    matchLabels: #定义匹配的标签,必须要设置
      app: linux36-nginx-selector #匹配的目标标签,
  template: #定义模板,必须定义,模板是起到描述要创建的pod的作用
    metadata: #定义模板元数据
      labels: #定义模板label,Deployment.spec.template.metadata.labels
        app: linux36-nginx-selector #定义标签,等于Deployment.spec.selector.matchLabels
    spec: #定义pod信息
      containers: #定义pod中容器列表,可以多个至少一个,pod不能动态增减容器
      - name: linux36-nginx-container #容器名称
        image: harbor.magedu.net/linux36/nginx-web1:v1 #镜像地址
        #command: ["/apps/tomcat/bin/run_tomcat.sh"] #容器启动执行的命令或脚本
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always #拉取镜像策略
        ports: #定义容器端口列表
        - containerPort: 80 #定义一个端口
          protocol: TCP #端口协议
          name: http #端口名称
        - containerPort: 443 #定义一个端口
          protocol: TCP #端口协议
          name: https #端口名称
        env: #配置环境变量
        - name: "password" #变量名称。必须要用引号引起来
          value: "123456" #当前变量的值
        - name: "age" #另一个变量名称
          value: "18" #另一个变量的值
        resources: #对资源的请求设置和限制设置
          limits: #资源限制设置,上限
            cpu: 500m  #cpu的限制,单位为core数,可以写0.5或者500m等CPU压缩值
            memory: 2Gi #内存限制,单位可以为Mib/Gib,将用于docker run --memory参数
          requests: #资源请求的设置
            cpu: 200m #cpu请求数,容器启动的初始可用数量,可以写0.5或者500m等CPU压缩值
            memory: 512Mi #内存请求大小,容器启动的初始可用数量,用于调度pod时候使用
          
---
kind: Service #类型为service
apiVersion: v1 #service API版本, service.apiVersion
metadata: #定义service元数据,service.metadata
  labels: #自定义标签,service.metadata.labels
    app: linux36-nginx #定义service标签的内容
  name: linux36-nginx-spec #定义service的名称,此名称会被DNS解析
  namespace: linux36 #该service隶属于的namespaces名称,即把service创建到哪个namespace里面
spec: #定义service的详细信息,service.spec
  type: NodePort #service的类型,定义服务的访问方式,默认为 ClusterIP,  service.spec.type
  ports: #定义访问端口, service.spec.ports
  - name: http #定义一个端口名称
    port: 80 #service 80端口
    protocol: TCP #协议类型
    targetPort: 80 #目标pod的端口
    nodePort: 30001 #node节点暴露的端口
  - name: https #SSL 端口
    port: 443 #service 443端口
    protocol: TCP #端口协议
    targetPort: 443 #目标pod端口
    nodePort: 30043 #node节点暴露的SSL端口
  selector: #service的标签选择器,定义要访问的目标pod
    app: linux36-nginx #将流量路到选择的pod上,须等于Deployment.spec.selector.matchLabels
资源对象详解及示例
  • Pod
#概述
1. pod是k8s中的最小单元。
2. 一个pod中可以运行一个容器,也可以运行多个容器。
3. 运行多个容器的话,这些容器是一起被调度的。
4. Pod的生命周期是短暂的,不会自愈,是用完就销毁的实体。
5. 一般我们是通过Controller来创建和管理pod的

#案例
[root@K8s-ansible case1-pod]#cat pod-test-case.yaml
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx:1.20.2-alpine
    ports:
    - containerPort: 80
  - name: redis
    image: redis:7.0-alpine 
    ports:
    - containerPort: 6379
  • Job与Cronjob
#Job是一次性任务,Cronjob周期性任务
#Job用于环境初始化,数据初始化(mysql/elasticsearch)
#CronJob 比如公有云的部分资源每次使用都需要生成token,可以做周期性token更新

#Job案例
[root@K8s-ansible case1-pod]#cat job-test-case.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: job-mysql-init
spec:
  template:
    spec:
      containers:
      - name: job-mysql-init-container
        image: centos:7.9.2009
        command: ["/bin/sh"]
        args: ["-c", "echo data init job at `date  %Y-%m-%d_%H-%M-%S` >> /cache/data.log"]
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        hostPath:
          path: /tmp/jobdata
      restartPolicy: Never

#CronJob案例
[root@K8s-ansible case1-pod]#cat CronJob-test-case.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
  name: cronjob-mysql-databackup
spec:
  schedule: "*/2 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: cronjob-mysql-databackup-pod
            image: centos:7.9.2009
            #imagePullPolicy: IfNotPresent
            command: ["/bin/sh"]
            args: ["-c", "echo mysql databackup cronjob at `date  %Y-%m-%d_%H-%M-%S` >> /cache/data.log"]
            volumeMounts: 
            - mountPath: /cache
              name: cache-volume
          volumes:
          - name: cache-volume
            hostPath:
              path: /tmp/cronjobdata
          restartPolicy: OnFailure
  • RC/RS 副本控制器
#Replication Controller:副本控制器(selector = !=) #第一代pod副本控制器
#ReplicaSet:副本控制器,和副本控制器的区别是:对选择器的支持(selector 还支持in notin) #第二代pod副本控制器

#RC案例
[root@K8s-ansible case1-pod]#cat RC-test-case.yaml
apiVersion: apps/v1 
kind: ReplicationController  
metadata:  
  name: ng-rc
spec:  
  replicas: 2
  selector:  
    app: ng-rc-80 
  template:   
    metadata:  
      labels:  
        app: ng-rc-80
    spec:  
      containers:  
      - name: ng-rc-80 
        image: nginx  
        ports:  
        - containerPort: 80 
        
#RS案例
[root@K8s-ansible case1-pod]#cat RS-test-case.yaml
apiVersion: apps/v1 
kind: ReplicaSet
metadata:
  name: frontend
spec:
  replicas: 2
  selector:
    matchLabels:
      app: ng-rs-80
    #matchExpressions:
    #  - {key: app, operator: In, values: [ng-rs-80,ng-rs-81]}
  template:
    metadata:
      labels:
        app: ng-rs-80
    spec:  
      containers:  
      - name: ng-rs-80 
        image: nginx:1.16.1
        ports:  
        - containerPort: 80
  • Deployment 副本控制器
#Deployment:比rs更高一级的控制器,除了有rs的功能之外,还有很多高级功能,,比如说最重要的:滚动升级、回滚等 #第三代pod控制器
#官方文档
https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/

#Deployment案例
[root@K8s-ansible case1-pod]#cat Deployment-test-case.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 2
  selector:
    #app: ng-deploy-80 #rc
    matchLabels: #rs or deployment
      app: ng-deploy-80
    #matchExpressions:
    #  - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.20.2
        ports:
        - containerPort: 80
  • Service

学新通

  • service通过label标签动态匹配后端endpoint,解耦了服务和应用
  • kube-proxy监听着k8s-apiserver,一旦service资源发生变化,kubeproxy就会生成对应的负载调度的调整,这样就保证service的最新状态。
  • kube-proxy有三种调度模型,当前默认ipvs,次之使用iptables
#service类型:
 #ClusterIP:用于内部服务基于service name的访问。
 #NodePort:用于kubernetes集群以外的服务主动访问运行在kubernetes集群内部的服务。
 #LoadBalancer:用于公有云环境的服务暴露。
 #ExternalName:用于将kubernetes集群外部的服务映射至kubernetes集群内部访问,从而让集群内部的pod能够通过固定的service name访问集群外部的服务,有时候也用于将不同namespace之间的pod通过ExternalName进行访问。
#案例
[root@K8s-ansible case1-pod]#cat Service-test-case.yaml 
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
    #matchExpressions:
    #  - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.20.0
        ports:
        - containerPort: 80
      #nodeSelector:
      #  env: group1

#ClusterIP案例 - ClusterIP:在k8s内部使用
[root@K8s-ansible case1-pod]#cat ClusterIP-test-case.yaml
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80 
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    protocol: TCP
  #type: ClusterIP
  #clusterIP: 10.100.21.199
  selector:
    app: ng-deploy-80

#NodePort案例 - 会在每一个node监听一个相同的端口,用户客户端的访问,会把请求转发之对应的service,然后service再转发给pod,用于将k8s中的服务暴露给k8s环境以外的客户端访问
[root@K8s-ansible case1-pod]#cat NodePort-test-case.yaml
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80 
spec:
  ports:
  - name: http
    port: 80 #Service端口
    targetPort: 80 #容器端口
    nodePort: 30012 #对外端口
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80
  • Volume-存储卷

学新通

#官方API
https://kubernetes.io/zh/docs/concepts/storage/volumes/
  • Volume将容器中的指定数据和容器解耦,并将数据存储到指定的位置,不同的存储卷功能不一样,如果是基于网络存储的存储卷可以可实现容器间的数据共享和持久化。
  • 静态存储卷需要在使用前手动创建PV和PVC,然后绑定至pod使用
  • 常用的几种卷:
  • Secret:是一种包含少量敏感信息例如密码、令牌或密钥的对象
  • configmap: 配置文件,解耦配置文件和容器
  • emptyDir:本地临时卷,针对单机存储,删除目录数据也会丢失,适合存放缓存或非重要文件
  • hostPath:本地存储卷,针对单机存储,删除目录数据会保留,类似于容器数据挂载
  • nfs等:网络存储卷
  • 示例
    emptyDir

学新通

#当 Pod 被分配给节点时,首先创建 emptyDir 卷,并且只要该 Pod 在该节点上运行,该卷就会存在,正如卷的名字所述,它最初是空的,Pod 中的容器可以读取和写入 emptyDir 卷中的相同文件,尽管该卷可以挂载到每个容器中的相同或不同路径上。当出于任何原因从节点中删除 Pod 时,emptyDir 中的数据将被永久删除。

#案例
[root@K8s-ansible case1-pod]#cat EmptyDir-test-case.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        emptyDir: {}

hostPath

学新通

#hostPath 卷将主机节点的文件系统中的文件或目录挂载到集群中,pod删除的时候,卷不会被删除

#案例
[root@K8s-ansible case1-pod]#cat HostPath-test-case.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        hostPath:
          path: /data/kubernetes

nfs共享存储

学新通

#nfs 卷允许将现有的 NFS(网络文件系统)挂载到容器中,且不像 emptyDir会丢失数据,当删除 Pod 时,nfs 卷的内容被保留,卷仅仅是被卸载,这意味着 NFS 卷可以预先上传好数据待pod启动后即可直接使用,并且网络存储可以在多 pod 之间共享同一份数据,即NFS 可以被多个pod同时挂载和读写。

#案例
#创建多个pod测试挂载同一个NFS
#1.准备一个NFS存储服务器,并放入文件
[root@K8s-haproxy01 ~]#apt install nfs-server nfs-common #nfs-common需要Node节点安装
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
Note, selecting 'nfs-kernel-server' instead of 'nfs-server'
nfs-kernel-server is already the newest version (1:2.6.1-1ubuntu1.2).
0 upgraded, 0 newly installed, 0 to remove and 53 not upgraded.
[root@K8s-haproxy01 ~]#mkdir -p /data/k8sdata 
[root@K8s-haproxy01 ~]#cd  /data/k8sdata 
[root@K8s-haproxy01 k8sdata]#rz -E
rz waiting to receive.
[root@K8s-haproxy01 k8sdata]#ls
微信图片_20221001115756.jpg
#2.设置文件共享
[root@K8s-haproxy01 k8sdata]#vim /etc/exports 
[root@K8s-haproxy01 k8sdata]#cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#       to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#

/data/k8sdata *(rw,no_root_squash)
[root@K8s-haproxy01 k8sdata]#systemctl restart nfs-server;systemctl enable nfs-server
#测试共享文件是否可见
[root@K8s-ansible script]#showmount -e 192.168.11.203
Export list for 192.168.11.203:
/data/k8sdata *

#4.创建并使用对象资源挂载nfs存储
[root@K8s-ansible case1-pod]#cat nfs-test-case1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/mysite
          name: my-nfs-volume
      volumes:
      - name: my-nfs-volume
        nfs:
          server: 192.168.11.203
          path: /data/k8sdata

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80
    
[root@K8s-ansible case1-pod]#kubectl apply -f nfs-test-case1.yaml 
deployment.apps/nginx-deployment created
service/ng-deploy-80 created
[root@K8s-ansible case1-pod]#kubectl get pod -n default
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-56748595d8-7b78l   1/1     Running   0          7m15s
[root@K8s-ansible case1-pod]#kubectl describe pod nginx-deployment-56748595d8-7b78l -n default
Name:             nginx-deployment-56748595d8-7b78l
Namespace:        default
Priority:         0
Service Account:  default
Node:             192.168.11.216/192.168.11.216
Start Time:       Tue, 04 Apr 2023 13:25:59  0000
Labels:           app=ng-deploy-80
                  pod-template-hash=56748595d8
Annotations:      <none>
Status:           Running
IP:               10.200.128.143
IPs:
  IP:           10.200.128.143
Controlled By:  ReplicaSet/nginx-deployment-56748595d8
Containers:
  ng-deploy-80:
    Container ID:   containerd://15dc4749de73cf728d08fbd5a9c5a7b803a0e0ce8d422d2dcb414026388773d5
    Image:          nginx
    Image ID:       docker.io/library/nginx@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Tue, 04 Apr 2023 13:26:32  0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /usr/share/nginx/html/mysite from my-nfs-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-g4wl5 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  my-nfs-volume:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.11.203
    Path:      /data/k8sdata
    ReadOnly:  false
  kube-api-access-g4wl5:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age    From               Message
  ----    ------     ----   ----               -------
  Normal  Scheduled  9m9s   default-scheduler  Successfully assigned default/nginx-deployment-56748595d8-7b78l to 192.168.11.216
  Normal  Pulling    9m7s   kubelet            Pulling image "nginx"
  Normal  Pulled     8m37s  kubelet            Successfully pulled image "nginx" in 29.789300349s (29.7893475s including waiting)
  Normal  Created    8m37s  kubelet            Created container ng-deploy-80
  Normal  Started    8m36s  kubelet            Started container ng-deploy-80

#haproxy增加对nfs负载调度
[root@K8s-haproxy01 k8sdata]#cat /etc/haproxy/haproxy.cfg 
...

listen myserver-80
    bind 192.168.11.242:80
    mode tcp
    server K8s-master01 192.168.11.211:30016 check inter 3000 fall 2 rise 5
    server K8s-master02 192.168.11.212:30016 check inter 3000 fall 2 rise 5
    server K8s-master03 192.168.11.213:30016 check inter 3000 fall 2 rise 5

[root@K8s-haproxy01 k8sdata]#systemctl restart  haproxy

#测试是否能访问访问到nfs资源
http://192.168.11.242/mysite/资源名称

学新通

#案例
#创建多个pod测试每个pod挂载多个NFS
[root@K8s-ansible case1-pod]#cat nfs-test-case2.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-site2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-81
  template:
    metadata:
      labels:
        app: ng-deploy-81
    spec:
      containers:
      - name: ng-deploy-81
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/pool1
          name: my-nfs-volume-pool1
        - mountPath: /usr/share/nginx/html/pool2
          name: my-nfs-volume-pool2
      volumes:
      - name: my-nfs-volume-pool1
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/pool1
      - name: my-nfs-volume-pool2
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/pool2

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-81
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30017
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-81
  • PV/PVC

学新通

  • PV:PersistentVolume
  • 是集群中已经由kubernetes管理员配置的一个网络存储,集群中的存储资源一个集群资源,即不隶属于任何namespace,PV的数据最终存储在硬件存储,pod不能直接挂载PV,PV需要绑定给PVC并最终由pod挂载PVC使用,PV其支持NFS、Ceph、商业存储或云提供商的特定的存储等,可以自定义PV的类型是块还是文件存储、存储空间大小、访问模式等,PV 的生命周期独立于Pod,即当使用PV的Pod被删除时可以对PV中的数据没有影响。
  • PVC :PersistentVolumeClaim
  • 是pod对存储的请求, pod挂载PVC并将数据存储在PVC,而PVC需要绑定到PV才能使用,另外PVC在创建的时候要指定 namespace,即pod要和PVC运行在同一个namespace,可以对PVC设置特定的空间大小和访问模式,使用PVC的pod在删除时也可以对PVC中的数据没有影响。
  • 用于实现pod和storage的解耦,这样我们修改storage的时候不需要修改pod。
  • 与NFS的区别,可以在PV和PVC层面实现实现对存储服务器的空间分配、存储的访问权限管理等。
  • Volume-存储卷类型
  • static:静态存储卷 ,需要在使用前手动创建PV、然后创建PVC并绑定到PV,然后挂载至pod使用,适用于PV和PVC相对比较固定的业务场景。
  • dynamin:动态存储卷,先创建一个存储类storageclass,后期pod在使用PVC的时候可以通过存储类动态创建PVC,适用于有状态服务集群如MySQL一主多从、zookeeper集群等。
#PV/PVC不支持数据持久化,只是Kubernetes逻辑上的存储管理对象,本指上依赖其他存储服务
#不同存储卷支持的访问模式
https://v1-22.docs.kubernetes.io/zh/docs/concepts/storage/persistent-volumes/

#PV常用参数
Capacity: #当前PV空间大小,kubectl explain PersistentVolume.spec.capacity
accessModes :访问模式,#kubectl explain PersistentVolume.spec.accessModes
ReadWriteOnce – PV只能被单个节点以读写权限挂载,RWO
ReadOnlyMany – PV以可以被多个节点挂载但是权限是只读的,ROX
ReadWriteMany – PV可以被多个节点是读写方式挂载使用,RWX
persistentVolumeReclaimPolicy #删除机制即删除存储卷卷时候,已经创建好的存储卷由以下删除操作:
#kubectl explain PersistentVolume.spec.persistentVolumeReclaimPolicy
Retain – 删除PV后保持原装,最后需要管理员手动删除
Recycle – 空间回收,及删除存储卷上的所有数据(包括目录和隐藏文件),目前仅支持NFS和hostPath
Delete – 自动删除存储卷
volumeMode #卷类型,kubectl explain PersistentVolume.spec.volumeMode
定义存储卷使用的文件系统是块设备还是文件系统,默认为文件系统
mountOptions #附加的挂载选项列表,实现更精细的权限控制

#PVC常用参数
accessModes :PVC 访问模式,#kubectl explain PersistentVolumeClaim.spec.volumeMode
 ReadWriteOnce – PVC只能被单个节点以读写权限挂载,RWO
 ReadOnlyMany – PVC以可以被多个节点挂载但是权限是只读的,ROX
 ReadWriteMany – PVC可以被多个节点是读写方式挂载使用,RWX
resources: #定义PVC创建存储卷的空间大小
selector: #标签选择器,选择要绑定的PV
 matchLabels #匹配标签名称
 matchExpressions #基于正则表达式匹配
volumeName #要绑定的PV名称
volumeMode #卷类型
 定义PVC使用的文件系统是块设备还是文件系统,默认为文件系统

------------------------------------静态存储使用案例-------------------------------------

#PV示例
[root@K8s-ansible case1-pod]#cat pv-test-case.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: myserver-myapp-static-pv
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/myserver/myappdata
    server: 172.31.7.109

#PVC示例
[root@K8s-ansible case1-pod]#cat pvc-test-case.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: myserver-myapp-static-pvc
  namespace: myserver
spec:
  volumeName: myserver-myapp-static-pv
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi

#挂载PVC示例
[root@K8s-ansible case1-pod]#cat pvcuse-test-case.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0 
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-static-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30080
  selector:
    app: myserver-myapp-frontend

------------------------------------动态存储使用案例-------------------------------------
#请参考下一章Kubernetes资源对象管理及示例Ⅱ

我是moore,大家一起加油!!!

这篇好文章是转载于:学新通技术网

  • 版权申明: 本站部分内容来自互联网,仅供学习及演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,请提供相关证据及您的身份证明,我们将在收到邮件后48小时内删除。
  • 本站站名: 学新通技术网
  • 本文地址: /boutique/detail/tanhhjkeba
系列文章
更多 icon
同类精品
更多 icon
继续加载