From 01b42116796628d444d3c5d8f08c71cf4b132949 Mon Sep 17 00:00:00 2001 From: Georgiana Dolocan Date: Tue, 26 Mar 2024 12:29:59 +0200 Subject: [PATCH] Only deploy profilelist to staging first --- config/clusters/awi-ciroh/common.values.yaml | 77 ------------------- config/clusters/awi-ciroh/prod.values.yaml | 8 ++ config/clusters/awi-ciroh/staging.values.yaml | 76 ++++++++++++++++++ 3 files changed, 84 insertions(+), 77 deletions(-) diff --git a/config/clusters/awi-ciroh/common.values.yaml b/config/clusters/awi-ciroh/common.values.yaml index ef3db392df..6ccc2cc18a 100644 --- a/config/clusters/awi-ciroh/common.values.yaml +++ b/config/clusters/awi-ciroh/common.values.yaml @@ -45,83 +45,6 @@ basehub: - jameshalgren - arpita0911patel - karnesh - singleuser: - profileList: - # The mem-guarantees are here so k8s doesn't schedule other pods - # on these nodes. They need to be just under total allocatable - # RAM on a node, not total node capacity. Values calculated using - # https://learnk8s.io/kubernetes-instance-calculator - # - # FIXME: These are changed to a temporary node sharing setup based on - # the legacy choices to help us pre-warm capacity independent on - # the choices users end up making and avoiding running into - # persistent disk quotas. - # - # Change PR: https://github.com/2i2c-org/infrastructure/pull/2539 - # Related event: https://github.com/2i2c-org/infrastructure/issues/2520 - # - # This is an interim setup, trying to balance the experience of - # the previous 1:1 user:node setup with a node sharing setup. It - # is not meant to be retained long term! - # - # -[ ] Make this cluster have a node sharing setup like in the - # basehub/daskhub template. - # - - display_name: "Small" - description: 5GB RAM, 2 CPUs - default: true - profile_options: &profile_options - image: - display_name: Image - choices: - old: - display_name: Original Pangeo Notebook base image 2023.01.13 - slug: "tensorflow" - kubespawner_override: - image: "quay.io/2i2c/awi-ciroh-image:f7222fce8b16 " - new: - display_name: New Pangeo Notebook base image 2023.09.11 - default: true - slug: "pytorch" - kubespawner_override: - image: "quay.io/2i2c/awi-ciroh-image:88ea9a74a66e" - kubespawner_override: - mem_limit: 7G - mem_guarantee: 5G - cpu_limit: 2 - cpu_guarantee: 0.938 - node_selector: - node.kubernetes.io/instance-type: n2-highmem-16 - - display_name: Medium - description: 11GB RAM, 4 CPUs - profile_options: *profile_options - kubespawner_override: - mem_limit: 15G - mem_guarantee: 11G - cpu_limit: 4 - cpu_guarantee: 1.875 - node_selector: - node.kubernetes.io/instance-type: n2-highmem-16 - - display_name: Large - description: 24GB RAM, 8 CPUs - profile_options: *profile_options - kubespawner_override: - mem_limit: 30G - mem_guarantee: 24G - cpu_limit: 8 - cpu_guarantee: 3.75 - node_selector: - node.kubernetes.io/instance-type: n2-highmem-16 - - display_name: Huge - description: 52GB RAM, 16 CPUs - profile_options: *profile_options - kubespawner_override: - mem_limit: 60G - mem_guarantee: 52G - cpu_limit: 16 - cpu_guarantee: 7.5 - node_selector: - node.kubernetes.io/instance-type: n2-highmem-16 dask-gateway: gateway: backend: diff --git a/config/clusters/awi-ciroh/prod.values.yaml b/config/clusters/awi-ciroh/prod.values.yaml index d0b5d8d724..84a850e2e8 100644 --- a/config/clusters/awi-ciroh/prod.values.yaml +++ b/config/clusters/awi-ciroh/prod.values.yaml @@ -9,6 +9,14 @@ basehub: - hosts: [ciroh.awi.2i2c.cloud] secretName: https-auto-tls singleuser: + image: + # Image build repo: https://github.com/2i2c-org/awi-ciroh-image + # + # NOTE: The configurator is used in this cluster, so this is stale + # configuration. + # + name: "quay.io/2i2c/awi-ciroh-image" + tag: "584293e50d4c" extraEnv: SCRATCH_BUCKET: gs://awi-ciroh-scratch/$(JUPYTERHUB_USER) PANGEO_SCRATCH: gs://awi-ciroh-scratch/$(JUPYTERHUB_USER) diff --git a/config/clusters/awi-ciroh/staging.values.yaml b/config/clusters/awi-ciroh/staging.values.yaml index 16f288d698..55020c3fbe 100644 --- a/config/clusters/awi-ciroh/staging.values.yaml +++ b/config/clusters/awi-ciroh/staging.values.yaml @@ -9,6 +9,82 @@ basehub: - hosts: [staging.ciroh.awi.2i2c.cloud] secretName: https-auto-tls singleuser: + profileList: + # The mem-guarantees are here so k8s doesn't schedule other pods + # on these nodes. They need to be just under total allocatable + # RAM on a node, not total node capacity. Values calculated using + # https://learnk8s.io/kubernetes-instance-calculator + # + # FIXME: These are changed to a temporary node sharing setup based on + # the legacy choices to help us pre-warm capacity independent on + # the choices users end up making and avoiding running into + # persistent disk quotas. + # + # Change PR: https://github.com/2i2c-org/infrastructure/pull/2539 + # Related event: https://github.com/2i2c-org/infrastructure/issues/2520 + # + # This is an interim setup, trying to balance the experience of + # the previous 1:1 user:node setup with a node sharing setup. It + # is not meant to be retained long term! + # + # -[ ] Make this cluster have a node sharing setup like in the + # basehub/daskhub template. + # + - display_name: "Small" + description: 5GB RAM, 2 CPUs + default: true + profile_options: &profile_options + image: + display_name: Image + choices: + old: + display_name: Original Pangeo Notebook base image 2023.01.13 + slug: "tensorflow" + kubespawner_override: + image: "quay.io/2i2c/awi-ciroh-image:f7222fce8b16 " + new: + display_name: New Pangeo Notebook base image 2023.09.11 + default: true + slug: "pytorch" + kubespawner_override: + image: "quay.io/2i2c/awi-ciroh-image:88ea9a74a66e" + kubespawner_override: + mem_limit: 7G + mem_guarantee: 5G + cpu_limit: 2 + cpu_guarantee: 0.938 + node_selector: + node.kubernetes.io/instance-type: n2-highmem-16 + - display_name: Medium + description: 11GB RAM, 4 CPUs + profile_options: *profile_options + kubespawner_override: + mem_limit: 15G + mem_guarantee: 11G + cpu_limit: 4 + cpu_guarantee: 1.875 + node_selector: + node.kubernetes.io/instance-type: n2-highmem-16 + - display_name: Large + description: 24GB RAM, 8 CPUs + profile_options: *profile_options + kubespawner_override: + mem_limit: 30G + mem_guarantee: 24G + cpu_limit: 8 + cpu_guarantee: 3.75 + node_selector: + node.kubernetes.io/instance-type: n2-highmem-16 + - display_name: Huge + description: 52GB RAM, 16 CPUs + profile_options: *profile_options + kubespawner_override: + mem_limit: 60G + mem_guarantee: 52G + cpu_limit: 16 + cpu_guarantee: 7.5 + node_selector: + node.kubernetes.io/instance-type: n2-highmem-16 extraEnv: SCRATCH_BUCKET: gs://awi-ciroh-scratch-staging/$(JUPYTERHUB_USER) PANGEO_SCRATCH: gs://awi-ciroh-scratch-staging/$(JUPYTERHUB_USER)