将动态创建的PV挂载到同一个pod上的多个容器

问题描述 投票:0回答:1

我正在研究一个用例,我需要在 jupyterhub pod 中添加一个新容器,这个新容器(sidecontainer)监视 jupyterhub 目录。

Jupyterhub 容器在启动时创建动态 PV,请参阅下面的部分

singleuser:
  podNameTemplate:
  extraTolerations: []
  nodeSelector: {}
  extraNodeAffinity:
    required: []
    preferred: []
  extraPodAffinity:
    required: []
    preferred: []
  extraPodAntiAffinity:
    required: []
    preferred: []
  networkTools:
    image:
      name: jupyterhub/k8s-network-tools
      tag: "set-by-chartpress"
      pullPolicy:
      pullSecrets: []
    resources: {}
  cloudMetadata:
    # block set to true will append a privileged initContainer using the
    # iptables to block the sensitive metadata server at the provided ip.
    blockWithIptables: true
    ip: 169.254.169.254
  networkPolicy:
    enabled: true
    ingress: []
    egress:
      # Required egress to communicate with the hub and DNS servers will be
      # augmented to these egress rules.
      #
      # This default rule explicitly allows all outbound traffic from singleuser
      # pods, except to a typical IP used to return metadata that can be used by
      # someone with malicious intent.
      - to:
          - ipBlock:
              cidr: 0.0.0.0/0
              except:
                - 169.254.169.254/32
    interNamespaceAccessLabels: ignore
    allowedIngressPorts: []
  events: true
  extraAnnotations: {}
  extraLabels:
    hub.jupyter.org/network-access-hub: "true"
  extraFiles: {}
  extraEnv: {}
  lifecycleHooks: {}
  initContainers: []
  extraContainers: []
  uid: 1000
  fsGid: 100
  serviceAccountName:
  storage:
    type: dynamic
    extraLabels: {}
    extraVolumes: []
    extraVolumeMounts: []
    static:
      pvcName:
      subPath: "{username}"
    capacity: 10Gi
    homeMountPath: /home/jovyan
    dynamic:
      storageClass:
      pvcNameTemplate: claim-{username}{servername}
      volumeNameTemplate: volume-{username}{servername}
      storageAccessModes: [ReadWriteOnce]
  image:
    name: jupyterhub/k8s-singleuser-sample
    tag: "set-by-chartpress"
    pullPolicy:
    pullSecrets: []
  startTimeout: 300
  cpu:
    limit:
    guarantee:
  memory:
    limit:
    guarantee: 1G
  extraResource:
    limits: {}
    guarantees: {}
  cmd:
  defaultUrl:
  extraPodConfig: {}
  profileList: []

我已在部署文件的 extraContainer 部分中添加了新容器,我的容器确实启动了,但动态 PV 未安装在该容器上。

我试图在 kubernetes 级别实现技术上可行的用例。

此处提供完整的 yaml 文件以供参考 https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/main/jupyterhub/values.yaml

配置图供参考

  singleuser:
  baseUrl: /
  cloudMetadata:
    enabled: true
    ip: xx.xx.xx.xx
  cpu: {}
  events: true
  extraAnnotations: {}
  extraConfigFiles:
    config_files:
      - cm_key: ''
        content: ''
        file_path: ''
      - cm_key: ''
        content: ''
        file_path: ''
    enabled: false
  extraContainers:
    - image: 'docker:19.03-rc-dind'
      lifecycle:
        postStart:
          exec:
            command:
              - sh
              - '-c'
              - update-ca-certificates; echo Certificates Updated
      name: dind
      securityContext:
        privileged: true
      volumeMounts:
        - mountPath: /var/lib/docker
          name: dind-storage
        - mountPath: /usr/local/share/ca-certificates/
          name: docker-cert
  extraEnv:
    ACTUAL_HADOOP_CONF_DIR: ''
    ACTUAL_HIVE_CONF_DIR: ''
    ACTUAL_SPARK_CONF_DIR: ''
    CDH_PARCEL_DIR: ''
    DOCKER_HOST: ''
    JAVA_HOME: 
    LIVY_URL: ''
    SPARK2_PARCEL_DIR: ''
  extraLabels:
    hub.jupyter.org/network-access-hub: 'true'
  extraNodeAffinity:
    preferred: []
    required: []
  extraPodAffinity:
    preferred: []
    required: []
  extraPodAntiAffinity:
    preferred: []
    required: []
  extraPodConfig: {}
  extraResource:
    guarantees: {}
    limits: {}
  extraTolerations: []
  fsGid: 0
  image:
    name: >-
      /jupyterhub/jpt-spark-magic
    pullPolicy: IfNotPresent
    tag: xxx
  imagePullSecret:
    email: null
    enabled: false
    registry: null
    username: null
  initContainers: []
  lifecycleHooks: {}
  memory:
    guarantee: 1G
  networkPolicy:
    egress:
      - to:
          - ipBlock:
              cidr: 0.0.0.0/0
              except:
                - 169.254.169.254/32
    enabled: false
    ingress: []
  networkTools:
    image:
      name: >-
        /k8s-hub-multispawn
      pullPolicy: IfNotPresent
      tag: '12345'
  nodeSelector: {}
  profileList:
    - description: Python for data enthusiasts
      display_name: 0
      kubespawner_override:
        cmd:
          - jpt-entry-cmd.sh
        cpu_limit: 1
        environment:
          XYZ_SERVICE_URL: 'http://XYZ:8080'
          CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
          DOCKER_HOST: 'tcp://localhost:2375'
          HADOOP_CONF_DIR: /etc/hadoop/conf
          HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
          HDP_DIR: /usr/hdp/3.1.5.6091-7
          HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
          HDP_VERSION: 3.1.5.6091-7
          HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
          HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
          INTEGRATION_ENV: HDP3
          JAVA_HOME: /usr/jdk64/jdk1.8.0_112
          LD_LIBRARY_PATH: >-
            /usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
          LIVY_URL: 'http://ammaster01.fake.org:8999'
          MLFLOW_TRACKING_URI: 'http://mlflow:5100'
          NO_PROXY: mlflow
          SPARK_CONF_DIR: /etc/spark2/conf
          SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
          SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
          TOOLS_BASE_PATH: /usr/local/bin
        image: >-
          /jupyterhub/jpt-spark-magic:1.1.2
        mem_limit: 4096M
        uid: 0
    - description: R for data enthusiasts
      display_name: 1
      kubespawner_override:
        cmd:
          - start-all.sh
        environment:
          XYZ_SERVICE_URL: 'http://XYZ-service:8080'
          DISABLE_AUTH: 'true'
          XYZ: /home/rstudio/kitematic
        image: '/jupyterhub/rstudio:364094'
        uid: 0
    - description: Python for data enthusiasts test2
      display_name: 2
      kubespawner_override:
        cmd:
          - jpt-entry-cmd.sh
        cpu_limit: 4
        environment:
          XYZ_SERVICE_URL: 'http://XYZ-service:8080'
          CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
          DOCKER_HOST: 'tcp://localhost:2375'
          HADOOP_CONF_DIR: /etc/hadoop/conf
          HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
          HDP_DIR: /usr/hdp/3.1.5.6091-7
          HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
          HDP_VERSION: 3.1.5.6091-7
          HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
          HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
          INTEGRATION_ENV: HDP3
          JAVA_HOME: /usr/jdk64/jdk1.8.0_112
          LD_LIBRARY_PATH: >-
            /usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
          LIVY_URL: 'http://xyz:8999'
          MLFLOW_TRACKING_URI: 'http://mlflow:5100'
          NO_PROXY: mlflow
          SPARK_CONF_DIR: /etc/spark2/conf
          SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
          SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
          TOOLS_BASE_PATH: /usr/local/bin
        image: >-
          /jupyterhub/jpt-spark-magic:1.1.2
        mem_limit: 8192M
        uid: 0
    - description: Python for data enthusiasts test3
      display_name: 3
      kubespawner_override:
        cmd:
          - jpt-entry-cmd.sh
        cpu_limit: 8
        environment:
          XYZ_SERVICE_URL: 'http://XYZ-service:8080'
          CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
          DOCKER_HOST: 'tcp://localhost:2375'
          HADOOP_CONF_DIR: /etc/hadoop/conf
          HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
          HDP_DIR: /usr/hdp/3.1.5.6091-7
          HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
          HDP_VERSION: 3.1.5.6091-7
          HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
          HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
          INTEGRATION_ENV: HDP3
          JAVA_HOME: /usr/jdk64/jdk1.8.0_112
          LD_LIBRARY_PATH: >-
            /usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
          LIVY_URL: 'http://fake.org:8999'
          MLFLOW_TRACKING_URI: 'http://mlflow:5100'
          NO_PROXY: mlflow
          SPARK_CONF_DIR: /etc/spark2/conf
          SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
          SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
          TOOLS_BASE_PATH: /usr/local/bin
        image: >-
          /jupyterhub/jpt-spark-magic:1.1.2
        mem_limit: 16384M
        uid: 0
  startTimeout: 300
  storage:
    capacity: 10Gi
    dynamic:
      pvcNameTemplate: 'claim-{username}{servername}'
      storageAccessModes:
        - ReadWriteOnce
      storageClass: nfs-client
      volumeNameTemplate: 'volume-{username}{servername}'
    extraLabels: {}
    extraVolumeMounts:
      - mountPath: /etc/krb5.conf
        name: krb
        readOnly: true
      - mountPath: /usr/jdk64/jdk1.8.0_112
        name: java-home
        readOnly: true
      - mountPath: /xyz/conda/envs
        name: xyz-conda-envs
        readOnly: false
      - mountPath: /usr/hdp/
        name: bigdata
        readOnly: true
        subPath: usr-hdp
      - mountPath: /etc/hadoop/
        name: bigdata
        readOnly: true
        subPath: HDP
      - mountPath: /etc/hive/
        name: bigdata
        readOnly: true
        subPath: hdp-hive
      - mountPath: /etc/spark2/
        name: bigdata
        readOnly: true
        subPath: hdp-spark2
    extraVolumes:
      - emptyDir: {}
        name: dind-storage
      - name: docker-cert
        secret:
          secretName: docker-cert
      - hostPath:
          path: /var/lib/ut_xyz_ts/jdk1.8.0_112
          type: Directory
        name: java-home
      - hostPath:
          path: /xyz/conda/envs
          type: Directory
        name: xyz-conda-envs
      - hostPath:
          path: /etc/krb5.conf
          type: File
        name: krb
      - name: bigdata
        persistentVolumeClaim:
          claimName: bigdata
    homeMountPath: '/home/{username}'
    static:
      subPath: '{username}'
    type: dynamic
  uid: 0

提前致谢..

kubernetes persistent-volumes jupyterhub
1个回答
0
投票

对于您的问题,从技术上讲,同一 Pod 的两个或多个容器是否共享相同的卷,答案在

Yes
中。请参阅此处 - https://youtu.be/GQJP9QdHHs8?t=82 .

但是您还应该在额外容器规范中定义一个volumeMount(也请参阅视频中的示例)。如果您可以检查这一点,或者分享

kubectl describe deployment <your-deployment>
的输出,我可以确认。

© www.soinside.com 2019 - 2024. All rights reserved.