diff --git a/applications/jupyterhub/Dockerfile b/applications/jupyterhub/Dockerfile index 8b279adc..907ce672 100755 --- a/applications/jupyterhub/Dockerfile +++ b/applications/jupyterhub/Dockerfile @@ -1,31 +1,39 @@ ARG CLOUDHARNESS_BASE FROM $CLOUDHARNESS_BASE as base -FROM jupyterhub/k8s-hub:1.1.3 +FROM quay.io/jupyterhub/k8s-hub:3.2.1 USER root COPY --from=base libraries/models/requirements.txt /libraries/models/requirements.txt -RUN pip install -r /libraries/models/requirements.txt +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -r /libraries/models/requirements.txt COPY --from=base libraries/cloudharness-common/requirements.txt /libraries/cloudharness-common/requirements.txt -RUN pip install -r /libraries/cloudharness-common/requirements.txt +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -r /libraries/cloudharness-common/requirements.txt COPY --from=base libraries/client/cloudharness_cli/requirements.txt /libraries/client/cloudharness_cli/requirements.txt -RUN pip install -r /libraries/client/cloudharness_cli/requirements.txt +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -r /libraries/client/cloudharness_cli/requirements.txt COPY --from=base libraries/models /libraries/models -RUN pip install -e /libraries/models +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -e /libraries/models COPY --from=base libraries/cloudharness-common /libraries/cloudharness-common COPY --from=base libraries/client/cloudharness_cli /libraries/client/cloudharness_cli # -RUN pip install -e /libraries/cloudharness-common -RUN pip install -e /libraries/client/cloudharness_cli +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -e /libraries/cloudharness-common +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install -e /libraries/client/cloudharness_cli COPY src src -RUN pip install ./src/harness_jupyter -RUN pip install ./src/chauthenticator +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install ./src/harness_jupyter +RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\ + pip install ./src/chauthenticator USER jovyan diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md index d7d67d4d..9ad78d2f 100755 --- a/applications/jupyterhub/README.md +++ b/applications/jupyterhub/README.md @@ -37,7 +37,8 @@ TODO: remember to implement/revise this code after you have updated/changed the The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart. 1. Run update.sh [TAG] # Do not use latest! -2. Restore from the diff files with EDIT: CLOUDHARNESS +2. Restore from the diff files with EDIT: CLOUDHARNESS. Use update.patch as a reference +3. 3. Update Dockerfile to use the same base image you see on values.yaml: hub/image Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG] diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py index 8ec801ee..5ebe20b5 100755 --- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py +++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py @@ -537,6 +537,7 @@ def camelCaseify(s): c.Authenticator.auto_login = True c.OAuthenticator.client_id = client_id c.OAuthenticator.client_secret = client_secret + c.OAuthenticator.allow_all = True c.GenericOAuthenticator.login_service = "CH" c.GenericOAuthenticator.username_key = "email" diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py index fc368f64..2fe0d25b 100755 --- a/applications/jupyterhub/deploy/resources/hub/z2jh.py +++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py @@ -119,6 +119,7 @@ def get_config(key, default=None): value = value[level] # EDIT: CLOUDHARNESS START + import re if value and isinstance(value, str): replace_var = re.search("{{.*?}}", value) if replace_var: diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl index 3159d103..e9d2b4f4 100644 --- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl +++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl @@ -178,7 +178,7 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username representing the old z2jh config, output the result in $c. */}} - {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }} + {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub) }} {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}} {{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}} @@ -191,7 +191,7 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username {{- /* UPDATE c dict explicitly with auth.custom.config */}} {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }} {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }} - {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}} + {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub }} {{- range $key, $val := $custom_config }} {{- $_ := set $custom_config $key "***" }} {{- end }} diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl index a2023639..1737f3d6 100755 --- a/applications/jupyterhub/deploy/templates/_helpers.tpl +++ b/applications/jupyterhub/deploy/templates/_helpers.tpl @@ -194,7 +194,7 @@ component: {{ include "jupyterhub.componentLabel" . }} using "toYaml | fromYaml" in order to be able to use normal helm template functions on it. */}} - {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}} + {{- $jupyterhub_values := .root.Values.apps.jupyterhub }} {{- if ne .root.Chart.Name "jupyterhub" }} {{- if .root.Values.apps.jupyterhub.jupyterhub }} {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }} diff --git a/applications/jupyterhub/update.patch b/applications/jupyterhub/update.patch new file mode 100644 index 00000000..5241525b --- /dev/null +++ b/applications/jupyterhub/update.patch @@ -0,0 +1,5845 @@ +diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md +index d961d03..d7d67d4 100755 +--- a/applications/jupyterhub/README.md ++++ b/applications/jupyterhub/README.md +@@ -31,3 +31,13 @@ To support the pre pulling of task images see (https://github.com/MetaCell/cloud + the template `templates/image-puller/_helpers-daemonset.tpl` has been changed (see line 167 and on) + + TODO: remember to implement/revise this code after you have updated/changed the templates of JupyterHub ++ ++## How to update ++ ++The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart. ++ ++1. Run update.sh [TAG] # Do not use latest! ++2. Restore from the diff files with EDIT: CLOUDHARNESS ++ ++Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG] ++ +diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py +index d4b3cee..8ec801e 100755 +--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py ++++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py +@@ -1,9 +1,17 @@ ++# load the config object (satisfies linters) ++c = get_config() # noqa ++ ++import glob + import os + import re + import sys +-import logging + ++from jupyterhub.utils import url_path_join ++from kubernetes_asyncio import client + from tornado.httpclient import AsyncHTTPClient ++ ++#CLOUDHARNESS: EDIT START ++import logging + from kubernetes import client + from jupyterhub.utils import url_path_join + +@@ -12,7 +20,7 @@ try: + harness_hub() # activates harness hooks on jupyterhub + except Exception as e: + logging.error("could not import harness_jupyter", exc_info=True) +- ++# CLOUDHARNESS: EDIT END + + # Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath + configuration_directory = os.path.dirname(os.path.realpath(__file__)) +@@ -20,39 +28,13 @@ sys.path.insert(0, configuration_directory) + + from z2jh import ( + get_config, +- set_config_if_not_none, + get_name, + get_name_env, + get_secret_value, ++ set_config_if_not_none, + ) + + +-print('Base url is', c.JupyterHub.get('base_url', '/')) +- +-# Configure JupyterHub to use the curl backend for making HTTP requests, +-# rather than the pure-python implementations. The default one starts +-# being too slow to make a large number of requests to the proxy API +-# at the rate required. +-AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") +- +-c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner' +- +-# Connect to a proxy running in a different pod +-c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT'])) +-c.ConfigurableHTTPProxy.should_start = False +- +-# Do not shut down user pods when hub is restarted +-c.JupyterHub.cleanup_servers = False +- +-# Check that the proxy has routes appropriately setup +-c.JupyterHub.last_activity_interval = 60 +- +-# Don't wait at all before redirecting a spawning user to the progress page +-c.JupyterHub.tornado_settings = { +- 'slow_spawn_timeout': 0, +-} +- +- + def camelCaseify(s): + """convert snake_case to camelCase + +@@ -173,6 +155,7 @@ for trait, cfg_key in ( + ("events_enabled", "events"), + ("extra_labels", None), + ("extra_annotations", None), ++ # ("allow_privilege_escalation", None), # Managed manually below + ("uid", None), + ("fs_gid", None), + ("service_account", "serviceAccountName"), +@@ -206,10 +189,19 @@ image = get_config("singleuser.image.name") + if image: + tag = get_config("singleuser.image.tag") + if tag: +- image = "{}:{}".format(image, tag) ++ image = f"{image}:{tag}" + + c.KubeSpawner.image = image + ++# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a ++# property where None, False, and True all are valid values that users of the ++# Helm chart may want to set, we can't use the set_config_if_not_none helper ++# function as someone may want to override the default False value to None. ++# ++c.KubeSpawner.allow_privilege_escalation = get_config( ++ "singleuser.allowPrivilegeEscalation" ++) ++ + # Combine imagePullSecret.create (single), imagePullSecrets (list), and + # singleuser.image.pullSecrets (list). + image_pull_secrets = [] +@@ -255,7 +247,7 @@ if match_node_purpose: + pass + else: + raise ValueError( +- "Unrecognized value for matchNodePurpose: %r" % match_node_purpose ++ f"Unrecognized value for matchNodePurpose: {match_node_purpose}" + ) + + # Combine the common tolerations for user pods with singleuser tolerations +@@ -271,7 +263,7 @@ if storage_type == "dynamic": + pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate") + c.KubeSpawner.pvc_name_template = pvc_name_template + volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate") +- c.KubeSpawner.storage_pvc_ensure = False ++ c.KubeSpawner.storage_pvc_ensure = True + set_config_if_not_none( + c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass" + ) +@@ -354,41 +346,62 @@ c.KubeSpawner.volume_mounts.extend( + ) + + c.JupyterHub.services = [] ++c.JupyterHub.load_roles = [] + ++# jupyterhub-idle-culler's permissions are scoped to what it needs only, see ++# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions. ++# + if get_config("cull.enabled", False): ++ jupyterhub_idle_culler_role = { ++ "name": "jupyterhub-idle-culler", ++ "scopes": [ ++ "list:users", ++ "read:users:activity", ++ "read:servers", ++ "delete:servers", ++ # "admin:users", # dynamically added if --cull-users is passed ++ ], ++ # assign the role to a jupyterhub service, so it gains these permissions ++ "services": ["jupyterhub-idle-culler"], ++ } ++ + cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"] + base_url = c.JupyterHub.get("base_url", "/") + cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api")) + + cull_timeout = get_config("cull.timeout") + if cull_timeout: +- cull_cmd.append("--timeout=%s" % cull_timeout) ++ cull_cmd.append(f"--timeout={cull_timeout}") + + cull_every = get_config("cull.every") + if cull_every: +- cull_cmd.append("--cull-every=%s" % cull_every) ++ cull_cmd.append(f"--cull-every={cull_every}") + + cull_concurrency = get_config("cull.concurrency") + if cull_concurrency: +- cull_cmd.append("--concurrency=%s" % cull_concurrency) ++ cull_cmd.append(f"--concurrency={cull_concurrency}") + + if get_config("cull.users"): + cull_cmd.append("--cull-users") ++ jupyterhub_idle_culler_role["scopes"].append("admin:users") ++ ++ if not get_config("cull.adminUsers"): ++ cull_cmd.append("--cull-admin-users=false") + + if get_config("cull.removeNamedServers"): + cull_cmd.append("--remove-named-servers") + + cull_max_age = get_config("cull.maxAge") + if cull_max_age: +- cull_cmd.append("--max-age=%s" % cull_max_age) ++ cull_cmd.append(f"--max-age={cull_max_age}") + + c.JupyterHub.services.append( + { +- "name": "cull-idle", +- "admin": True, ++ "name": "jupyterhub-idle-culler", + "command": cull_cmd, + } + ) ++ c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role) + + for key, service in get_config("hub.services", {}).items(): + # c.JupyterHub.services is a list of dicts, but +@@ -402,26 +415,44 @@ for key, service in get_config("hub.services", {}).items(): + + c.JupyterHub.services.append(service) + ++for key, role in get_config("hub.loadRoles", {}).items(): ++ # c.JupyterHub.load_roles is a list of dicts, but ++ # hub.loadRoles is a dict of dicts to make the config mergable ++ role.setdefault("name", key) ++ ++ c.JupyterHub.load_roles.append(role) ++ ++# respect explicit null command (distinct from unspecified) ++# this avoids relying on KubeSpawner.cmd's default being None ++_unspecified = object() ++specified_cmd = get_config("singleuser.cmd", _unspecified) ++if specified_cmd is not _unspecified: ++ c.Spawner.cmd = specified_cmd + +-set_config_if_not_none(c.Spawner, "cmd", "singleuser.cmd") + set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl") + +-cloud_metadata = get_config("singleuser.cloudMetadata", {}) ++cloud_metadata = get_config("singleuser.cloudMetadata") + + if cloud_metadata.get("blockWithIptables") == True: + # Use iptables to block access to cloud metadata by default + network_tools_image_name = get_config("singleuser.networkTools.image.name") + network_tools_image_tag = get_config("singleuser.networkTools.image.tag") ++ network_tools_resources = get_config("singleuser.networkTools.resources") ++ ip = cloud_metadata["ip"] + ip_block_container = client.V1Container( + name="block-cloud-metadata", + image=f"{network_tools_image_name}:{network_tools_image_tag}", + command=[ + "iptables", +- "-A", ++ "--append", + "OUTPUT", +- "-d", +- cloud_metadata.get("ip", "169.254.169.254"), +- "-j", ++ "--protocol", ++ "tcp", ++ "--destination", ++ ip, ++ "--destination-port", ++ "80", ++ "--jump", + "DROP", + ], + security_context=client.V1SecurityContext( +@@ -429,6 +460,7 @@ if cloud_metadata.get("blockWithIptables") == True: + run_as_user=0, + capabilities=client.V1Capabilities(add=["NET_ADMIN"]), + ), ++ resources=network_tools_resources, + ) + + c.KubeSpawner.init_containers.append(ip_block_container) +@@ -438,17 +470,6 @@ if get_config("debug.enabled", False): + c.JupyterHub.log_level = "DEBUG" + c.Spawner.debug = True + +-# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files +-config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d" +-if os.path.isdir(config_dir): +- for file_path in sorted(glob.glob(f"{config_dir}/*.py")): +- file_name = os.path.basename(file_path) +- print(f"Loading {config_dir} config: {file_name}") +- with open(file_path) as f: +- file_content = f.read() +- # compiling makes debugging easier: https://stackoverflow.com/a/437857 +- exec(compile(source=file_content, filename=file_name, mode="exec")) +- + # load potentially seeded secrets + # + # NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable +@@ -471,11 +492,23 @@ for app, cfg in get_config("hub.config", {}).items(): + cfg.pop("keys", None) + c[app].update(cfg) + ++# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files ++config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d" ++if os.path.isdir(config_dir): ++ for file_path in sorted(glob.glob(f"{config_dir}/*.py")): ++ file_name = os.path.basename(file_path) ++ print(f"Loading {config_dir} config: {file_name}") ++ with open(file_path) as f: ++ file_content = f.read() ++ # compiling makes debugging easier: https://stackoverflow.com/a/437857 ++ exec(compile(source=file_content, filename=file_name, mode="exec")) ++ + # execute hub.extraConfig entries + for key, config_py in sorted(get_config("hub.extraConfig", {}).items()): +- print("Loading extra config: %s" % key) ++ print(f"Loading extra config: {key}") + exec(config_py) + ++# CLOUDHARNESS: EDIT START + # Allow switching authenticators easily + auth_type = get_config('hub.config.JupyterHub.authenticator_class') + email_domain = 'local' +@@ -525,4 +558,5 @@ set_config_if_not_none(c.Authenticator, 'whitelist', 'auth.whitelist.users') + c.apps = get_config('apps') + c.registry = get_config('registry') + c.domain = get_config('root.domain') +-c.namespace = get_config('root.namespace') +\ No newline at end of file ++c.namespace = get_config('root.namespace') ++# CLOUDHARNESS: EDIT END +\ No newline at end of file +diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py +index 834a6b6..fc368f6 100755 +--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py ++++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py +@@ -3,15 +3,15 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs. + + Methods here can be imported by extraConfig in values.yaml + """ +-from collections import Mapping +-from functools import lru_cache + import os +-import re ++from collections.abc import Mapping ++from functools import lru_cache + + import yaml + ++ + # memoize so we only load config once +-@lru_cache() ++@lru_cache + def _load_config(): + """Load the Helm chart configuration used to render the Helm templates of + the chart from a mounted k8s Secret, and merge in values from an optionally +@@ -27,6 +27,7 @@ def _load_config(): + cfg = _merge_dictionaries(cfg, values) + else: + print(f"No config at {path}") ++ # EDIT: CLOUDHARNESS START + path = f"/opt/cloudharness/resources/allvalues.yaml" + if os.path.exists(path): + print("Loading global CloudHarness config at", path) +@@ -34,11 +35,11 @@ def _load_config(): + values = yaml.safe_load(f) + cfg = _merge_dictionaries(cfg, values) + cfg['root'] = values +- ++ # EDIT: CLOUDHARNESS END + return cfg + + +-@lru_cache() ++@lru_cache + def _get_config_value(key): + """Load value from the k8s ConfigMap given a key.""" + +@@ -50,7 +51,7 @@ def _get_config_value(key): + raise Exception(f"{path} not found!") + + +-@lru_cache() ++@lru_cache + def get_secret_value(key, default="never-explicitly-set"): + """Load value from the user managed k8s Secret or the default k8s Secret + given a key.""" +@@ -117,7 +118,7 @@ def get_config(key, default=None): + else: + value = value[level] + +- ++ # EDIT: CLOUDHARNESS START + if value and isinstance(value, str): + replace_var = re.search("{{.*?}}", value) + if replace_var: +@@ -128,6 +129,7 @@ def get_config(key, default=None): + if repl: + print("replace", variable, "in", value, ":", repl) + value = re.sub("{{.*?}}", repl, value) ++ # EDIT: CLOUDHARNESS END + return value + + +@@ -137,6 +139,5 @@ def set_config_if_not_none(cparent, name, key): + configuration item if not None + """ + data = get_config(key) +- + if data is not None: +- setattr(cparent, name, data) +\ No newline at end of file ++ setattr(cparent, name, data) +diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt +new file mode 100644 +index 0000000..9769a9c +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/NOTES.txt +@@ -0,0 +1,158 @@ ++{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}} ++ ++{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}} ++. __ __ __ __ __ ++ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_ ++ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \ ++/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ / ++\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/ ++ /_/ /____/ ++ ++ You have successfully installed the official JupyterHub Helm chart! ++ ++### Installation info ++ ++ - Kubernetes namespace: {{ .Release.Namespace }} ++ - Helm release name: {{ .Release.Name }} ++ - Helm chart version: {{ .Chart.Version }} ++ - JupyterHub version: {{ .Chart.AppVersion }} ++ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt ++ ++### Followup links ++ ++ - Documentation: https://z2jh.jupyter.org ++ - Help forum: https://discourse.jupyter.org ++ - Social chat: https://gitter.im/jupyterhub/jupyterhub ++ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues ++ ++### Post-installation checklist ++ ++ - Verify that created Pods enter a Running state: ++ ++ kubectl --namespace={{ .Release.Namespace }} get pod ++ ++ If a pod is stuck with a Pending or ContainerCreating status, diagnose with: ++ ++ kubectl --namespace={{ .Release.Namespace }} describe pod ++ ++ If a pod keeps restarting, diagnose with: ++ ++ kubectl --namespace={{ .Release.Namespace }} logs --previous ++ {{- println }} ++ ++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer" }} ++ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}. ++ ++ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }} ++ ++ If the external ip remains , diagnose with: ++ ++ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }} ++ {{- end }} ++ ++ - Verify web based access: ++ {{- println }} ++ {{- if .Values.apps.jupyterhub.ingress.enabled }} ++ {{- range $host := .Values.apps.jupyterhub.ingress.hosts }} ++ Try insecure HTTP access: http://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/ ++ {{- end }} ++ ++ {{- range $tls := .Values.apps.jupyterhub.ingress.tls }} ++ {{- range $host := $tls.hosts }} ++ Try secure HTTPS access: https://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/ ++ {{- end }} ++ {{- end }} ++ {{- else }} ++ You have not configured a k8s Ingress resource so you need to access the k8s ++ Service {{ $proxy_service }} directly. ++ {{- println }} ++ ++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "NodePort" }} ++ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means ++ that all the k8s cluster's nodes are exposing the k8s Service via those ++ ports. ++ ++ Try insecure HTTP access: http://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.http | default "no-http-nodeport-set"}} ++ Try secure HTTPS access: https://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.https | default "no-https-nodeport-set" }} ++ ++ {{- else }} ++ If your computer is outside the k8s cluster, you can port-forward traffic to ++ the k8s Service {{ $proxy_service }} with kubectl to access it from your ++ computer. ++ ++ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http ++ ++ Try insecure HTTP access: http://localhost:8080 ++ {{- end }} ++ {{- end }} ++ {{- println }} ++ ++ ++ ++ ++ ++{{- /* ++ Warnings for likely misconfigurations ++*/}} ++ ++{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }} ++################################################################################# ++###### WARNING: You are using user placeholders without pod priority ##### ++###### enabled*, either enable pod priority or stop using the ##### ++###### user placeholders** to avoid having placeholders that ##### ++###### refuse to make room for a real user. ##### ++###### ##### ++###### *scheduling.podPriority.enabled ##### ++###### **scheduling.userPlaceholder.enabled ##### ++###### **scheduling.userPlaceholder.replicas ##### ++################################################################################# ++{{- println }} ++{{- end }} ++ ++ ++ ++ ++ ++{{- /* ++ Breaking changes and failures for likely misconfigurations. ++*/}} ++ ++{{- $breaking := "" }} ++{{- $breaking_title := "\n" }} ++{{- $breaking_title = print $breaking_title "\n#################################################################################" }} ++{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }} ++{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }} ++{{- $breaking_title = print $breaking_title "\n###### #####" }} ++{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }} ++{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }} ++{{- $breaking_title = print $breaking_title "\n#################################################################################" }} ++ ++ ++{{- /* ++ This is an example (in a helm template comment) on how to detect and ++ communicate with regards to a breaking chart config change. ++ ++ {{- if hasKey .Values.apps.jupyterhub.singleuser.cloudMetadata "enabled" }} ++ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }} ++ {{- end }} ++*/}} ++ ++ ++{{- if hasKey .Values.apps.jupyterhub.rbac "enabled" }} ++{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }} ++{{- end }} ++ ++ ++{{- if hasKey .Values.apps.jupyterhub.hub "fsGid" }} ++{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }} ++{{- end }} ++ ++ ++{{- if and .Values.apps.jupyterhub.singleuser.cloudMetadata.blockWithIptables (and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled .Values.apps.jupyterhub.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }} ++{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }} ++{{- end }} ++ ++ ++{{- if $breaking }} ++{{- fail (print $breaking_title $breaking "\n\n") }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl +index b742a12..3159d10 100644 +--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl ++++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl +@@ -168,30 +168,30 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username + {{- $c := dict }} + {{- $result := (dict "hub" (dict "config" $c)) }} + {{- /* +- Flattens the config in .Values.apps.jupyterhub.auth to a format of ++ Flattens the config in .Values.apps.jupyterhub.apps.jupyterhub.auth to a format of + "keyX.keyY...": "value". Writes output to $c. + */}} +- {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.auth "type" "custom")) }} ++ {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.apps.jupyterhub.auth "type" "custom")) }} + + {{- /* + Transform the flattened config using a dictionary + representing the old z2jh config, output the result + in $c. + */}} +- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.global.safeToShowValues) }} ++ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }} + +- {{- $class_old_config_key := .Values.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}} ++ {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}} + {{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}} + {{- $class_new_config_key := "" }} {{- /* LDAPAuthenticator - GitHubOAuthenticator */}} + + {{- /* SET $class_new_entrypoint, $class_new_config_key */}} + {{- if eq $class_old_config_key "custom" }} +- {{- $class_new_entrypoint = .Values.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }} ++ {{- $class_new_entrypoint = .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }} + {{- $class_new_config_key = $class_new_entrypoint | splitList "." | last }} + {{- /* UPDATE c dict explicitly with auth.custom.config */}} +- {{- if .Values.apps.jupyterhub.auth.custom.config }} +- {{- $custom_config := merge (dict) .Values.apps.jupyterhub.auth.custom.config }} +- {{- if not .Values.apps.jupyterhub.global.safeToShowValues }} ++ {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }} ++ {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }} ++ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}} + {{- range $key, $val := $custom_config }} + {{- $_ := set $custom_config $key "***" }} + {{- end }} +@@ -213,7 +213,7 @@ The JupyterHub Helm chart's auth config has been reworked and requires changes. + + The new way to configure authentication in chart version 0.11.0+ is printed + below for your convenience. The values are not shown by default to ensure no +-secrets are exposed, run helm upgrade with --set global.safeToShowValues=true ++secrets are exposed, run helm upgrade with --set global.safeToSho.Values.apps.jupyterhub.true + to show them. + + {{ $result | toYaml }} +diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl +index e9cf7bb..401d601 100644 +--- a/applications/jupyterhub/deploy/templates/_helpers-names.tpl ++++ b/applications/jupyterhub/deploy/templates/_helpers-names.tpl +@@ -3,8 +3,8 @@ + parent charts to reference these dynamic resource names. + + To avoid duplicating documentation, for more information, please see the the +- fullnameOverride entry in schema.yaml or the configuration reference that +- schema.yaml renders to. ++ fullnameOverride entry in values.schema.yaml or the configuration reference ++ that values.schema.yaml renders to. + + https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride + */}} +@@ -38,8 +38,8 @@ + {{- $name_override := .Values.apps.jupyterhub.nameOverride }} + {{- if ne .Chart.Name "jupyterhub" }} + {{- if .Values.apps.jupyterhub.jupyterhub }} +- {{- $fullname_override = .Values.apps.jupyterhub.fullnameOverride }} +- {{- $name_override = .Values.apps.jupyterhub.nameOverride }} ++ {{- $fullname_override = .Values.apps.jupyterhub.jupyterhub.fullnameOverride }} ++ {{- $name_override = .Values.apps.jupyterhub.jupyterhub.nameOverride }} + {{- end }} + {{- end }} + +@@ -76,12 +76,23 @@ + {{- include "jupyterhub.fullname.dash" . }}hub + {{- end }} + ++{{- /* hub-serviceaccount ServiceAccount */}} ++{{- define "jupyterhub.hub-serviceaccount.fullname" -}} ++ {{- if .Values.apps.jupyterhub.hub.serviceAccount.create }} ++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }} ++ {{- else }} ++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default "default" }} ++ {{- end }} ++{{- end }} ++ + {{- /* hub-existing-secret Secret */}} + {{- define "jupyterhub.hub-existing-secret.fullname" -}} + {{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}} + {{- $existing_secret := .Values.apps.jupyterhub.hub.existingSecret }} + {{- if ne .Chart.Name "jupyterhub" }} +- {{- $existing_secret = .Values.apps.jupyterhub.hub.existingSecret }} ++ {{- if .Values.apps.jupyterhub.jupyterhub }} ++ {{- $existing_secret = .Values.apps.jupyterhub.jupyterhub.hub.existingSecret }} ++ {{- end }} + {{- end }} + {{- if $existing_secret }} + {{- $existing_secret }} +@@ -133,11 +144,29 @@ + {{- include "jupyterhub.fullname.dash" . }}autohttps + {{- end }} + ++{{- /* autohttps-serviceaccount ServiceAccount */}} ++{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}} ++ {{- if .Values.apps.jupyterhub.proxy.traefik.serviceAccount.create }} ++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }} ++ {{- else }} ++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default "default" }} ++ {{- end }} ++{{- end }} ++ + {{- /* user-scheduler Deployment */}} + {{- define "jupyterhub.user-scheduler-deploy.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}user-scheduler + {{- end }} + ++{{- /* user-scheduler-serviceaccount ServiceAccount */}} ++{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}} ++ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create }} ++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }} ++ {{- else }} ++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default "default" }} ++ {{- end }} ++{{- end }} ++ + {{- /* user-scheduler leader election lock resource */}} + {{- define "jupyterhub.user-scheduler-lock.fullname" -}} + {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock +@@ -153,6 +182,15 @@ + {{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter + {{- end }} + ++{{- /* image-awaiter-serviceaccount ServiceAccount */}} ++{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}} ++ {{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create }} ++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }} ++ {{- else }} ++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default "default" }} ++ {{- end }} ++{{- end }} ++ + {{- /* hook-image-puller DaemonSet */}} + {{- define "jupyterhub.hook-image-puller.fullname" -}} + {{- include "jupyterhub.fullname.dash" . }}hook-image-puller +@@ -210,6 +248,15 @@ + {{- end }} + {{- end }} + ++{{- /* image-puller Priority */}} ++{{- define "jupyterhub.image-puller-priority.fullname" -}} ++ {{- if (include "jupyterhub.fullname" .) }} ++ {{- include "jupyterhub.fullname.dash" . }}image-puller ++ {{- else }} ++ {{- .Release.Name }}-image-puller-priority ++ {{- end }} ++{{- end }} ++ + {{- /* user-scheduler's registered name */}} + {{- define "jupyterhub.user-scheduler.fullname" -}} + {{- if (include "jupyterhub.fullname" .) }} +@@ -231,6 +278,7 @@ + fullname: {{ include "jupyterhub.fullname" . | quote }} + fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }} + hub: {{ include "jupyterhub.hub.fullname" . | quote }} ++hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }} + hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }} + hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }} + hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }} +@@ -241,10 +289,14 @@ proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }} + proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }} + proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }} + autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }} ++autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }} + user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }} ++user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }} + user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }} + user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }} ++image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }} + hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }} ++hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }} + hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }} + continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }} + singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }} +diff --git a/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl +new file mode 100644 +index 0000000..4075569 +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl +@@ -0,0 +1,101 @@ ++{{- /* ++ This named template renders egress rules for NetworkPolicy resources based on ++ common configuration. ++ ++ It is rendering based on the `egressAllowRules` and `egress` keys of the ++ passed networkPolicy config object. Each flag set to true under ++ `egressAllowRules` is rendered to a egress rule that next to any custom user ++ defined rules from the `egress` config. ++ ++ This named template needs to render based on a specific networkPolicy ++ resource, but also needs access to the root context. Due to that, it ++ accepts a list as its scope, where the first element is supposed to be the ++ root context and the second element is supposed to be the networkPolicy ++ configuration object. ++ ++ As an example, this is how you would render this named template from a ++ NetworkPolicy resource under its egress: ++ ++ egress: ++ # other rules here... ++ ++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }} ++ {{- . | nindent 4 }} ++ {{- end }} ++ ++ Note that the reference to privateIPs and nonPrivateIPs relate to ++ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses. ++*/}} ++ ++{{- define "jupyterhub.networkPolicy.renderEgressRules" -}} ++{{- $root := index . 0 }} ++{{- $netpol := index . 1 }} ++{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }} ++- ports: ++ - port: 53 ++ protocol: UDP ++ - port: 53 ++ protocol: TCP ++ to: ++ {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }} ++ # Allow outbound connections to DNS ports on the cloud metadata server ++ - ipBlock: ++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32 ++ {{- end }} ++ {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }} ++ # Allow outbound connections to DNS ports on pods in the kube-system ++ # namespace ++ - namespaceSelector: ++ matchLabels: ++ kubernetes.io/metadata.name: kube-system ++ {{- end }} ++ {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }} ++ # Allow outbound connections to DNS ports on destinations in the private IP ++ # ranges ++ - ipBlock: ++ cidr: 10.0.0.0/8 ++ - ipBlock: ++ cidr: 172.16.0.0/12 ++ - ipBlock: ++ cidr: 192.168.0.0/16 ++ {{- end }} ++{{- end }} ++ ++{{- if $netpol.egressAllowRules.nonPrivateIPs }} ++# Allow outbound connections to non-private IP ranges ++- to: ++ - ipBlock: ++ cidr: 0.0.0.0/0 ++ except: ++ # As part of this rule: ++ # - don't allow outbound connections to private IPs ++ - 10.0.0.0/8 ++ - 172.16.0.0/12 ++ - 192.168.0.0/16 ++ # - don't allow outbound connections to the cloud metadata server ++ - {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32 ++{{- end }} ++ ++{{- if $netpol.egressAllowRules.privateIPs }} ++# Allow outbound connections to private IP ranges ++- to: ++ - ipBlock: ++ cidr: 10.0.0.0/8 ++ - ipBlock: ++ cidr: 172.16.0.0/12 ++ - ipBlock: ++ cidr: 192.168.0.0/16 ++{{- end }} ++ ++{{- if $netpol.egressAllowRules.cloudMetadataServer }} ++# Allow outbound connections to the cloud metadata server ++- to: ++ - ipBlock: ++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32 ++{{- end }} ++ ++{{- with $netpol.egress }} ++# Allow outbound connections based on user specified rules ++{{ . | toYaml }} ++{{- end }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl +index efea86d..a202363 100755 +--- a/applications/jupyterhub/deploy/templates/_helpers.tpl ++++ b/applications/jupyterhub/deploy/templates/_helpers.tpl +@@ -12,7 +12,7 @@ + + When you ask a helper to render its content, one often forward the current + scope to the helper in order to allow it to access .Release.Name, +- .Values.apps.jupyterhub.rbac.enabled and similar values. ++ .Values.apps.jupyterhub.rbac.create and similar values. + + #### Example - Passing the current scope + {{ include "jupyterhub.commonLabels" . }} +@@ -180,8 +180,51 @@ component: {{ include "jupyterhub.componentLabel" . }} + Augments passed .pullSecrets with $.Values.apps.jupyterhub.imagePullSecrets + */}} + {{- define "jupyterhub.imagePullSecrets" -}} ++ {{- /* ++ We have implemented a trick to allow a parent chart depending on this ++ chart to call this named templates. ++ ++ Caveats and notes: ++ ++ 1. While parent charts can reference these, grandparent charts can't. ++ 2. Parent charts must not use an alias for this chart. ++ 3. There is no failsafe workaround to above due to ++ https://github.com/helm/helm/issues/9214. ++ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted ++ using "toYaml | fromYaml" in order to be able to use normal helm ++ template functions on it. ++ */}} ++ {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}} ++ {{- if ne .root.Chart.Name "jupyterhub" }} ++ {{- if .root.Values.apps.jupyterhub.jupyterhub }} ++ {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }} ++ {{- end }} ++ {{- end }} + ++ {{- /* Populate $_.list with all relevant entries */}} ++ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }} ++ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }} ++ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }} ++ {{- end }} + ++ {{- /* Decide if something should be written */}} ++ {{- if not (eq ($_.list | toJson) "[]") }} ++ ++ {{- /* Process the $_.list where strings become dicts with a name key and the ++ strings become the name keys' values into $_.res */}} ++ {{- $_ := set $_ "res" list }} ++ {{- range $_.list }} ++ {{- if eq (typeOf .) "string" }} ++ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }} ++ {{- else }} ++ {{- $__ := set $_ "res" (append $_.res .) }} ++ {{- end }} ++ {{- end }} ++ ++ {{- /* Write the results */}} ++ {{- $_.res | toJson }} ++ ++ {{- end }} + {{- end }} + + {{- /* +@@ -339,3 +382,21 @@ limits: + {{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }} + {{- end }} + {{- end }} ++ ++{{- /* ++ jupyterhub.chart-version-to-git-ref: ++ Renders a valid git reference from a chartpress generated version string. ++ In practice, either a git tag or a git commit hash will be returned. ++ ++ - The version string will follow a chartpress pattern, see ++ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags. ++ ++ - The regexReplaceAll function is a sprig library function, see ++ https://masterminds.github.io/sprig/strings.html. ++ ++ - The regular expression is in golang syntax, but \d had to become \\d for ++ example. ++*/}} ++{{- define "jupyterhub.chart-version-to-git-ref" -}} ++{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml +index c913f67..f52feb6 100755 +--- a/applications/jupyterhub/deploy/templates/hub/configmap.yaml ++++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml +@@ -29,5 +29,6 @@ data: + */}} + checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }} + ++ # EDIT: CLOUDHARNESS + allvalues.yaml: | + {{- .Values | toYaml | nindent 4 }} +\ No newline at end of file +diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml +index 82132c6..d105ecc 100755 +--- a/applications/jupyterhub/deploy/templates/hub/deployment.yaml ++++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml +@@ -5,6 +5,9 @@ metadata: + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + spec: ++ {{- if typeIs "int" .Values.apps.jupyterhub.hub.revisionHistoryLimit }} ++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.hub.revisionHistoryLimit }} ++ {{- end }} + replicas: 1 + selector: + matchLabels: +@@ -30,11 +33,14 @@ spec: + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: +-{{ include "deploy_utils.etcHosts" . | indent 6 }} ++{{ include "deploy_utils.etcHosts" . | indent 6 }} # EDIT: CLOUDHARNESS + {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }} ++ {{- with .Values.apps.jupyterhub.hub.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.hub.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +@@ -44,7 +50,7 @@ spec: + - name: config + configMap: + name: {{ include "jupyterhub.hub.fullname" . }} +- {{- /* This is needed by cloudharness libraries */}} ++ {{- /* EDIT: CLOUDHARNESS This is needed by cloudharness libraries */}} + - name: cloudharness-allvalues + configMap: + name: cloudharness-allvalues +@@ -82,11 +88,13 @@ spec: + persistentVolumeClaim: + claimName: {{ include "jupyterhub.hub-pvc.fullname" . }} + {{- end }} +- {{- if .Values.apps.jupyterhub.rbac.enabled }} +- serviceAccountName: {{ include "jupyterhub.hub.fullname" . }} ++ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }} ++ serviceAccountName: {{ . }} + {{- end }} ++ {{- with .Values.apps.jupyterhub.hub.podSecurityContext }} + securityContext: +- fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }} ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.hub.image) }} + imagePullSecrets: {{ . }} + {{- end }} +@@ -153,14 +161,14 @@ spec: + name: config + - mountPath: /usr/local/etc/jupyterhub/secret/ + name: secret +- - name: cloudharness-allvalues ++ - name: cloudharness-allvalues # EDIT: CLOUDHARNESS START + mountPath: /opt/cloudharness/resources/allvalues.yaml + subPath: allvalues.yaml + {{- if .Values.apps.accounts }} + - name: cloudharness-kc-accounts + mountPath: /opt/cloudharness/resources/auth + readOnly: true +- {{- end }} ++ {{- end }} # EDIT: CLOUDHARNESS END + {{- if (include "jupyterhub.hub-existing-secret.fullname" .) }} + - mountPath: /usr/local/etc/jupyterhub/existing-secret/ + name: existing-secret +diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml +index 9a7a6bc..d9508e2 100755 +--- a/applications/jupyterhub/deploy/templates/hub/netpol.yaml ++++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml +@@ -61,31 +61,24 @@ spec: + + egress: + # hub --> proxy +- - ports: +- - port: 8001 +- to: ++ - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "proxy") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8001 ++ + # hub --> singleuser-server +- - ports: +- - port: 8888 +- to: ++ - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "singleuser-server") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8888 + +- # hub --> Kubernetes internal DNS +- - ports: +- - protocol: UDP +- port: 53 +- - protocol: TCP +- port: 53 +- +- {{- with .Values.apps.jupyterhub.hub.networkPolicy.egress }} +- # hub --> depends, but the default is everything +- {{- . | toYaml | nindent 4 }} ++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }} ++ {{- . | nindent 4 }} + {{- end }} + {{- end }} +diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml +index 855609d..bb6c7b1 100755 +--- a/applications/jupyterhub/deploy/templates/hub/pdb.yaml ++++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml +@@ -1,9 +1,5 @@ + {{- if .Values.apps.jupyterhub.hub.pdb.enabled -}} +-{{- if .Capabilities.APIVersions.Has "policy/v1" }} + apiVersion: policy/v1 +-{{- else }} +-apiVersion: policy/v1beta1 +-{{- end }} + kind: PodDisruptionBudget + metadata: + name: {{ include "jupyterhub.hub.fullname" . }} +diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml +index 738daab..1b689af 100755 +--- a/applications/jupyterhub/deploy/templates/hub/rbac.yaml ++++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml +@@ -1,15 +1,4 @@ +-{{- if .Values.apps.jupyterhub.rbac.enabled -}} +-apiVersion: v1 +-kind: ServiceAccount +-metadata: +- name: {{ include "jupyterhub.hub.fullname" . }} +- {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }} +- annotations: +- {{- . | toYaml | nindent 4 }} +- {{- end }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +---- ++{{- if .Values.apps.jupyterhub.rbac.create -}} + kind: Role + apiVersion: rbac.authorization.k8s.io/v1 + metadata: +@@ -32,7 +21,7 @@ metadata: + {{- include "jupyterhub.labels" . | nindent 4 }} + subjects: + - kind: ServiceAccount +- name: {{ include "jupyterhub.hub.fullname" . }} ++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }} + namespace: "{{ .Release.Namespace }}" + roleRef: + kind: Role +diff --git a/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml +new file mode 100644 +index 0000000..817ed66 +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml +@@ -0,0 +1,12 @@ ++{{- if .Values.apps.jupyterhub.hub.serviceAccount.create -}} ++apiVersion: v1 ++kind: ServiceAccount ++metadata: ++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }} ++ {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }} ++ annotations: ++ {{- . | toYaml | nindent 4 }} ++ {{- end }} ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/image-pull-secret.yaml b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml +new file mode 100644 +index 0000000..b7544db +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml +@@ -0,0 +1,15 @@ ++{{- if .Values.apps.jupyterhub.imagePullSecret.create }} ++kind: Secret ++apiVersion: v1 ++metadata: ++ name: {{ include "jupyterhub.image-pull-secret.fullname" . }} ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++ annotations: ++ "helm.sh/hook": pre-install,pre-upgrade ++ "helm.sh/hook-delete-policy": before-hook-creation ++ "helm.sh/hook-weight": "-20" ++type: kubernetes.io/dockerconfigjson ++data: ++ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl +index e16fd1a..528345c 100644 +--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl ++++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl +@@ -34,6 +34,9 @@ spec: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100% ++ {{- if typeIs "int" .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }} ++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }} ++ {{- end }} + template: + metadata: + labels: +@@ -44,13 +47,17 @@ spec: + {{- end }} + spec: + {{- /* +- continuous-image-puller pods are made evictable to save on the k8s pods +- per node limit all k8s clusters have. ++ image-puller pods are made evictable to save on the k8s pods ++ per node limit all k8s clusters have and have a higher priority ++ than user-placeholder pods that could block an entire node. + */}} +- {{- if and (not .hook) .Values.apps.jupyterhub.scheduling.podPriority.enabled }} +- priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }} ++ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} ++ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }} ++ {{- end }} ++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }} + {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations .Values.apps.jupyterhub.prePuller.extraTolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +@@ -127,6 +134,7 @@ spec: + {{- /* --- Conditionally pull profileList images --- */}} + {{- if .Values.apps.jupyterhub.prePuller.pullProfileListImages }} + {{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }} ++ {{- /* profile's kubespawner_override */}} + {{- if $container.kubespawner_override }} + {{- if $container.kubespawner_override.image }} + - name: image-pull-singleuser-profilelist-{{ $k }} +@@ -145,13 +153,15 @@ spec: + {{- end }} + {{- end }} + {{- end }} +- {{- end }} +- {{- end }} +- +- {{- /* --- Pull extra images --- */}} +- {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }} +- - name: image-pull-{{ $k }} +- image: {{ $v.name }}:{{ $v.tag }} ++ {{- /* kubespawner_override in profile's profile_options */}} ++ {{- if $container.profile_options }} ++ {{- range $option, $option_spec := $container.profile_options }} ++ {{- if $option_spec.choices }} ++ {{- range $choice, $choice_spec := $option_spec.choices }} ++ {{- if $choice_spec.kubespawner_override }} ++ {{- if $choice_spec.kubespawner_override.image }} ++ - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }} ++ image: {{ $choice_spec.kubespawner_override.image }} + command: + - /bin/sh + - -c +@@ -163,13 +173,20 @@ spec: + {{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} +- {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} ++ {{- end }} + {{- end }} + +- {{- /* --- Pull CloudHarness tasks images --- */}} +- {{- range $k, $v := ( index .Values "task-images" ) }} +- - name: image-pull-{{ $k | replace "-" "" }} +- image: {{ $v }} ++ {{- /* --- Pull extra images --- */}} ++ {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }} ++ - name: image-pull-{{ $k }} ++ image: {{ $v.name }}:{{ $v.tag }} + command: + - /bin/sh + - -c +diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml +index bdd9f63..cc6db3e 100755 +--- a/applications/jupyterhub/deploy/templates/image-puller/job.yaml ++++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml +@@ -28,16 +28,22 @@ spec: + labels: + {{- /* Changes here will cause the Job to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} ++ {{- with .Values.apps.jupyterhub.prePuller.labels }} ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with .Values.apps.jupyterhub.prePuller.annotations }} + annotations: + {{- . | toYaml | nindent 8 }} + {{- end }} + spec: + restartPolicy: Never +- {{- if .Values.apps.jupyterhub.rbac.enabled }} +- serviceAccountName: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} ++ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} ++ serviceAccountName: {{ . }} ++ {{- end }} ++ {{- with .Values.apps.jupyterhub.prePuller.hook.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.prePuller.hook.nodeSelector }} + {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.prePuller.hook.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +@@ -58,6 +64,7 @@ spec: + - -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT) + - -namespace={{ .Release.Namespace }} + - -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }} ++ - -pod-scheduling-wait-duration={{ .Values.apps.jupyterhub.prePuller.hook.podSchedulingWaitDuration }} + {{- with .Values.apps.jupyterhub.prePuller.hook.containerSecurityContext }} + securityContext: + {{- . | toYaml | nindent 12 }} +diff --git a/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml +new file mode 100644 +index 0000000..1a3fca3 +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml +@@ -0,0 +1,18 @@ ++{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} ++{{- if or .Values.apps.jupyterhub.prePuller.hook.enabled .Values.apps.jupyterhub.prePuller.continuous.enabled -}} ++apiVersion: scheduling.k8s.io/v1 ++kind: PriorityClass ++metadata: ++ name: {{ include "jupyterhub.image-puller-priority.fullname" . }} ++ annotations: ++ meta.helm.sh/release-name: "{{ .Release.Name }}" ++ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++value: {{ .Values.apps.jupyterhub.scheduling.podPriority.imagePullerPriority }} ++globalDefault: false ++description: >- ++ Enables [hook|continuous]-image-puller pods to fit on nodes even though they ++ are clogged by user-placeholder pods, while not evicting normal user pods. ++{{- end }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml +index 95c86dd..5946896 100755 +--- a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml ++++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml +@@ -1,29 +1,8 @@ + {{- /* + Permissions to be used by the hook-image-awaiter job + */}} +-{{- if .Values.apps.jupyterhub.rbac.enabled }} +-{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) }} +-{{- /* +-This service account... +-*/ -}} +-apiVersion: v1 +-kind: ServiceAccount +-metadata: +- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +- hub.jupyter.org/deletable: "true" +- annotations: +- "helm.sh/hook": pre-install,pre-upgrade +- "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +- "helm.sh/hook-weight": "0" +- {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }} +- {{- . | toYaml | nindent 4 }} +- {{- end }} +---- +-{{- /* +-... will be used by this role... +-*/}} ++{{- if .Values.apps.jupyterhub.rbac.create -}} ++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} + kind: Role + apiVersion: rbac.authorization.k8s.io/v1 + metadata: +@@ -56,7 +35,7 @@ metadata: + "helm.sh/hook-weight": "0" + subjects: + - kind: ServiceAccount +- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }} ++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} + namespace: "{{ .Release.Namespace }}" + roleRef: + kind: Role +diff --git a/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml +new file mode 100644 +index 0000000..2e5fa72 +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml +@@ -0,0 +1,21 @@ ++{{- /* ++ServiceAccount for the pre-puller hook's image-awaiter-job ++*/}} ++{{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create -}} ++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}} ++apiVersion: v1 ++kind: ServiceAccount ++metadata: ++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }} ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++ hub.jupyter.org/deletable: "true" ++ annotations: ++ "helm.sh/hook": pre-install,pre-upgrade ++ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded ++ "helm.sh/hook-weight": "0" ++ {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }} ++ {{- . | toYaml | nindent 4 }} ++ {{- end }} ++{{- end }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt +deleted file mode 100755 +index 08bd7bb..0000000 +--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt ++++ /dev/null +@@ -1,9 +0,0 @@ +-# Automatic HTTPS Terminator +- +-This directory has Kubernetes objects for automatic Let's Encrypt Support. +-When enabled, we create a new deployment object that has an nginx-ingress +-and kube-lego container in it. This is responsible for requesting, +-storing and renewing certificates as needed from Let's Encrypt. +- +-The only change required outside of this directory is in the `proxy-public` +-service, which targets different hubs based on automatic HTTPS status. +\ No newline at end of file +diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml +deleted file mode 100755 +index 8d71a97..0000000 +--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml ++++ /dev/null +@@ -1,28 +0,0 @@ +-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }} +-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }} +-{{- if $autoHTTPS -}} +-{{- $_ := .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}} +- +-# This configmap contains Traefik configuration files to be mounted. +-# - traefik.yaml will only be read during startup (static configuration) +-# - dynamic.yaml will be read on change (dynamic configuration) +-# +-# ref: https://docs.traefik.io/getting-started/configuration-overview/ +-# +-# The configuration files are first rendered with Helm templating to large YAML +-# strings. Then we use the fromYAML function on these strings to get an object, +-# that we in turn merge with user provided extra configuration. +-# +-kind: ConfigMap +-apiVersion: v1 +-metadata: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +-data: +- traefik.yaml: | +- {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }} +- dynamic.yaml: | +- {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }} +- +-{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml +deleted file mode 100755 +index fcb062f..0000000 +--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml ++++ /dev/null +@@ -1,141 +0,0 @@ +-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }} +-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }} +-{{- if $autoHTTPS -}} +-apiVersion: apps/v1 +-kind: Deployment +-metadata: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +-spec: +- replicas: 1 +- selector: +- matchLabels: +- {{- include "jupyterhub.matchLabels" . | nindent 6 }} +- template: +- metadata: +- labels: +- {{- include "jupyterhub.matchLabels" . | nindent 8 }} +- hub.jupyter.org/network-access-proxy-http: "true" +- {{- with .Values.apps.jupyterhub.proxy.traefik.labels }} +- {{- . | toYaml | nindent 8 }} +- {{- end }} +- annotations: +- # Only force a restart through a change to this checksum when the static +- # configuration is changed, as the dynamic can be updated after start. +- # Any disruptions to this deployment impacts everything, it is the +- # entrypoint of all network traffic. +- checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | sha256sum }} +- spec: +- {{- if .Values.apps.jupyterhub.rbac.enabled }} +- serviceAccountName: {{ include "jupyterhub.autohttps.fullname" . }} +- {{- end }} +- {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} +- priorityClassName: {{ include "jupyterhub.priority.fullname" . }} +- {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.traefik.nodeSelector }} +- {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.traefik.tolerations }} +- tolerations: +- {{- . | toYaml | nindent 8 }} +- {{- end }} +- {{- include "jupyterhub.coreAffinity" . | nindent 6 }} +- volumes: +- - name: certificates +- emptyDir: {} +- - name: traefik-config +- configMap: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumes }} +- {{- . | toYaml | nindent 8 }} +- {{- end }} +- {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.proxy.traefik.image) }} +- imagePullSecrets: {{ . }} +- {{- end }} +- initContainers: +- - name: load-acme +- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}" +- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }} +- imagePullPolicy: {{ . }} +- {{- end }} +- args: +- - load +- - {{ include "jupyterhub.proxy-public-tls.fullname" . }} +- - acme.json +- - /etc/acme/acme.json +- env: +- # We need this to get logs immediately +- - name: PYTHONUNBUFFERED +- value: "True" +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }} +- {{- include "jupyterhub.extraEnv" . | nindent 12 }} +- {{- end }} +- volumeMounts: +- - name: certificates +- mountPath: /etc/acme +- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }} +- securityContext: +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- containers: +- - name: traefik +- image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}" +- {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }} +- imagePullPolicy: {{ . }} +- {{- end }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.resources }} +- resources: +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- ports: +- - name: http +- containerPort: 8080 +- - name: https +- containerPort: 8443 +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPorts }} +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- volumeMounts: +- - name: traefik-config +- mountPath: /etc/traefik +- - name: certificates +- mountPath: /etc/acme +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumeMounts }} +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }} +- env: +- {{- include "jupyterhub.extraEnv" . | nindent 12 }} +- {{- end }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.containerSecurityContext }} +- securityContext: +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- - name: secret-sync +- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}" +- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }} +- imagePullPolicy: {{ . }} +- {{- end }} +- args: +- - watch-save +- - --label=app={{ include "jupyterhub.appLabel" . }} +- - --label=release={{ .Release.Name }} +- - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +- - --label=heritage=secret-sync +- - {{ include "jupyterhub.proxy-public-tls.fullname" . }} +- - acme.json +- - /etc/acme/acme.json +- env: +- # We need this to get logs immediately +- - name: PYTHONUNBUFFERED +- value: "True" +- volumeMounts: +- - name: certificates +- mountPath: /etc/acme +- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }} +- securityContext: +- {{- . | toYaml | nindent 12 }} +- {{- end }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPodSpec }} +- {{- . | toYaml | nindent 6 }} +- {{- end }} +-{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml +deleted file mode 100755 +index ea43b67..0000000 +--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml ++++ /dev/null +@@ -1,40 +0,0 @@ +-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }} +-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }} +-{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}} +-apiVersion: rbac.authorization.k8s.io/v1 +-kind: Role +-metadata: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +- {{- with .Values.apps.jupyterhub.proxy.traefik.serviceAccount.annotations }} +- annotations: +- {{- . | toYaml | nindent 4 }} +- {{- end }} +-rules: +-- apiGroups: [""] +- resources: ["secrets"] +- verbs: ["get", "patch", "list", "create"] +---- +-apiVersion: rbac.authorization.k8s.io/v1 +-kind: RoleBinding +-metadata: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +-subjects: +-- kind: ServiceAccount +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- apiGroup: +-roleRef: +- kind: Role +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- apiGroup: rbac.authorization.k8s.io +---- +-apiVersion: v1 +-kind: ServiceAccount +-metadata: +- name: {{ include "jupyterhub.autohttps.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +-{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml +deleted file mode 100755 +index d57c135..0000000 +--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml ++++ /dev/null +@@ -1,25 +0,0 @@ +-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }} +-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }} +-{{- if $autoHTTPS -}} +-apiVersion: v1 +-kind: Service +-metadata: +- name: {{ include "jupyterhub.proxy-http.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +- {{- with .Values.apps.jupyterhub.proxy.service.labels }} +- {{- . | toYaml | nindent 4 }} +- {{- end }} +- {{- with .Values.apps.jupyterhub.proxy.service.annotations }} +- annotations: +- {{- . | toYaml | nindent 4 }} +- {{- end }} +-spec: +- type: ClusterIP +- selector: +- {{- $_ := merge (dict "componentLabel" "proxy") . }} +- {{- include "jupyterhub.matchLabels" $_ | nindent 4 }} +- ports: +- - port: 8000 +- targetPort: http +-{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml +index 6d63ba8..bb37b8f 100755 +--- a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml ++++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml +@@ -7,6 +7,9 @@ metadata: + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + spec: ++ {{- if typeIs "int" .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }} ++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }} ++ {{- end }} + replicas: 1 + selector: + matchLabels: +@@ -35,7 +38,7 @@ spec: + # match the k8s Secret during the first upgrade following an auth_token + # was generated. + checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }} +- checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }} ++ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum | quote }} + {{- with .Values.apps.jupyterhub.proxy.annotations }} + {{- . | toYaml | nindent 8 }} + {{- end }} +@@ -44,7 +47,10 @@ spec: + {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.chp.nodeSelector }} ++ {{- with .Values.apps.jupyterhub.proxy.chp.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.chp.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +@@ -135,6 +141,8 @@ spec: + livenessProbe: + initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }} ++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.timeoutSeconds }} ++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.failureThreshold }} + httpGet: + path: /_chp_healthz + {{- if or $manualHTTPS $manualHTTPSwithsecret }} +@@ -149,6 +157,8 @@ spec: + readinessProbe: + initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }} ++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.timeoutSeconds }} ++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.failureThreshold }} + httpGet: + path: /_chp_healthz + {{- if or $manualHTTPS $manualHTTPSwithsecret }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml +index adc8277..88a00be 100755 +--- a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml ++++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml +@@ -85,32 +85,24 @@ spec: + + egress: + # proxy --> hub +- - ports: +- - port: 8081 +- to: ++ - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "hub") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8081 + + # proxy --> singleuser-server +- - ports: +- - port: 8888 +- to: ++ - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "singleuser-server") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8888 + +- # proxy --> Kubernetes internal DNS +- - ports: +- - protocol: UDP +- port: 53 +- - protocol: TCP +- port: 53 +- +- {{- with .Values.apps.jupyterhub.proxy.chp.networkPolicy.egress }} +- # proxy --> depends, but the default is everything +- {{- . | toYaml | nindent 4 }} ++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.proxy.chp.networkPolicy)) }} ++ {{- . | nindent 4 }} + {{- end }} + {{- end }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml +index 1846a3b..155895b 100755 +--- a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml ++++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml +@@ -1,9 +1,5 @@ + {{- if .Values.apps.jupyterhub.proxy.chp.pdb.enabled -}} +-{{- if .Capabilities.APIVersions.Has "policy/v1" }} + apiVersion: policy/v1 +-{{- else }} +-apiVersion: policy/v1beta1 +-{{- end }} + kind: PodDisruptionBudget + metadata: + name: {{ include "jupyterhub.proxy.fullname" . }} +diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml +index 0d9ca5b..f634ba9 100755 +--- a/applications/jupyterhub/deploy/templates/proxy/service.yaml ++++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml +@@ -35,12 +35,15 @@ metadata: + {{- end }} + spec: + selector: ++ # This service will target the autohttps pod if autohttps is configured, and ++ # the proxy pod if not. When autohttps is configured, the service proxy-http ++ # will be around to target the proxy pod directly. + {{- if $autoHTTPS }} +- component: autohttps ++ {{- $_ := merge (dict "componentLabel" "autohttps") . -}} ++ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }} + {{- else }} +- component: proxy ++ {{- include "jupyterhub.matchLabels" . | nindent 4 }} + {{- end }} +- release: {{ .Release.Name }} + ports: + {{- if $HTTPS }} + - name: https +diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml +index 588cf19..1bed905 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml +@@ -4,22 +4,9 @@ kind: PriorityClass + metadata: + name: {{ include "jupyterhub.priority.fullname" . }} + annotations: +- # FIXME: PriorityClasses must be added before the other resources reference +- # them, and in the past a workaround was needed to accomplish this: +- # to make the resource a Helm hook. +- # +- # To transition this resource to no longer be a Helm hook resource, +- # we explicitly add ownership annotations/labels (in 1.0.0) which +- # will allow a future upgrade (in 2.0.0) to remove all hook and +- # ownership annotations/labels. +- # +- helm.sh/hook: pre-install,pre-upgrade +- helm.sh/hook-delete-policy: before-hook-creation +- helm.sh/hook-weight: "-100" + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + labels: +- app.kubernetes.io/managed-by: Helm + {{- $_ := merge (dict "componentLabel" "default-priority") . }} + {{- include "jupyterhub.labels" $_ | nindent 4 }} + value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml +index b1dc6c5..800ac20 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml +@@ -3,11 +3,7 @@ The cluster autoscaler should be allowed to evict and reschedule these pods if + it would help in order to scale down a node. + */}} + {{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}} +-{{- if .Capabilities.APIVersions.Has "policy/v1" }} + apiVersion: policy/v1 +-{{- else }} +-apiVersion: policy/v1beta1 +-{{- end }} + kind: PodDisruptionBudget + metadata: + name: {{ include "jupyterhub.user-placeholder.fullname" . }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml +index e03497d..688e217 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml +@@ -5,22 +5,9 @@ kind: PriorityClass + metadata: + name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }} + annotations: +- # FIXME: PriorityClasses must be added before the other resources reference +- # them, and in the past a workaround was needed to accomplish this: +- # to make the resource a Helm hook. +- # +- # To transition this resource to no longer be a Helm hook resource, +- # we explicitly add ownership annotations/labels (in 1.0.0) which +- # will allow a future upgrade (in 2.0.0) to remove all hook and +- # ownership annotations/labels. +- # +- helm.sh/hook: pre-install,pre-upgrade +- helm.sh/hook-delete-policy: before-hook-creation +- helm.sh/hook-weight: "-100" + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + labels: +- app.kubernetes.io/managed-by: Helm + {{- include "jupyterhub.labels" . | nindent 4 }} + value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }} + globalDefault: false +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml +index 114f626..c243bee 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml +@@ -16,6 +16,9 @@ metadata: + {{- include "jupyterhub.labels" . | nindent 4 }} + spec: + podManagementPolicy: Parallel ++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }} ++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }} ++ {{- end }} + replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }} + selector: + matchLabels: +@@ -23,9 +26,16 @@ spec: + serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }} + template: + metadata: ++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.annotations }} ++ annotations: ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + labels: + {{- /* Changes here will cause the Deployment to restart the pods. */}} + {{- include "jupyterhub.matchLabels" . | nindent 8 }} ++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.labels }} ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + spec: + {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }} +@@ -33,7 +43,10 @@ spec: + {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }} + schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }} ++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml +index ef8a37f..3e83b44 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml +@@ -6,16 +6,28 @@ metadata: + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + data: +- # ref: https://kubernetes.io/docs/reference/scheduling/config/ ++ {{- /* ++ This is configuration of a k8s official kube-scheduler binary running in the ++ user-scheduler. ++ ++ ref: https://kubernetes.io/docs/reference/scheduling/config/ ++ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/ ++ */}} + config.yaml: | +- apiVersion: kubescheduler.config.k8s.io/v1beta1 ++ apiVersion: kubescheduler.config.k8s.io/v1 + kind: KubeSchedulerConfiguration + leaderElection: +- resourceLock: endpoints ++ resourceLock: leases + resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }} + resourceNamespace: "{{ .Release.Namespace }}" + profiles: + - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.plugins }} + plugins: +- {{- .Values.apps.jupyterhub.scheduling.userScheduler.plugins | toYaml | nindent 10 }} ++ {{- . | toYaml | nindent 10 }} ++ {{- end }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.pluginConfig }} ++ pluginConfig: ++ {{- . | toYaml | nindent 10 }} ++ {{- end }} + {{- end }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml +index 1bcaf31..f22d0de 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml +@@ -6,6 +6,9 @@ metadata: + labels: + {{- include "jupyterhub.labels" . | nindent 4 }} + spec: ++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }} ++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }} ++ {{- end }} + replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }} + selector: + matchLabels: +@@ -14,16 +17,25 @@ spec: + metadata: + labels: + {{- include "jupyterhub.matchLabels" . | nindent 8 }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.labels }} ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + annotations: + checksum/config-map: {{ include (print $.Template.BasePath "/jupyterhub/scheduling/user-scheduler/configmap.yaml") . | sha256sum }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.annotations }} ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + spec: +- {{- if .Values.apps.jupyterhub.rbac.enabled }} +- serviceAccountName: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} ++ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} ++ serviceAccountName: {{ . }} + {{- end }} + {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }} + priorityClassName: {{ include "jupyterhub.priority.fullname" . }} + {{- end }} +- nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }} ++ nodeSelector: ++ {{- . | toYaml | nindent 8 }} ++ {{- end }} + {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.scheduling.userScheduler.tolerations }} + tolerations: + {{- . | toYaml | nindent 8 }} +@@ -44,13 +56,6 @@ spec: + {{- end }} + command: + - /usr/local/bin/kube-scheduler +- # NOTE: --leader-elect-... (new) and --lock-object-... (deprecated) +- # flags are silently ignored in favor of whats defined in the +- # passed KubeSchedulerConfiguration whenever --config is +- # passed. +- # +- # ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/ +- # + # NOTE: --authentication-skip-lookup=true is used to avoid a + # seemingly harmless error, if we need to not skip + # "authentication lookup" in the future, see the linked issue. +@@ -65,12 +70,14 @@ spec: + livenessProbe: + httpGet: + path: /healthz +- port: 10251 ++ scheme: HTTPS ++ port: 10259 + initialDelaySeconds: 15 + readinessProbe: + httpGet: + path: /healthz +- port: 10251 ++ scheme: HTTPS ++ port: 10259 + {{- with .Values.apps.jupyterhub.scheduling.userScheduler.resources }} + resources: + {{- . | toYaml | nindent 12 }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml +index 04f2af8..2c9c6de 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml +@@ -1,9 +1,5 @@ + {{- if and .Values.apps.jupyterhub.scheduling.userScheduler.enabled .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}} +-{{- if .Capabilities.APIVersions.Has "policy/v1" }} + apiVersion: policy/v1 +-{{- else }} +-apiVersion: policy/v1beta1 +-{{- end }} + kind: PodDisruptionBudget + metadata: + name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml +index 083e065..9c7fab7 100755 +--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml +@@ -1,16 +1,5 @@ + {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}} +-{{- if .Values.apps.jupyterhub.rbac.enabled }} +-apiVersion: v1 +-kind: ServiceAccount +-metadata: +- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} +- labels: +- {{- include "jupyterhub.labels" . | nindent 4 }} +- {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }} +- annotations: +- {{- . | toYaml | nindent 4 }} +- {{- end }} +---- ++{{- if .Values.apps.jupyterhub.rbac.create -}} + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: +@@ -19,13 +8,23 @@ metadata: + {{- include "jupyterhub.labels" . | nindent 4 }} + rules: + # Copied from the system:kube-scheduler ClusterRole of the k8s version +- # matching the kube-scheduler binary we use. A modification of two resource +- # name references from kube-scheduler to user-scheduler-lock was made. ++ # matching the kube-scheduler binary we use. A modification has been made to ++ # resourceName fields to remain relevant for how we have named our resources ++ # in this Helm chart. + # +- # NOTE: These rules have been unchanged between 1.12 and 1.15, then changed in +- # 1.16 and in 1.17, but unchanged in 1.18 and 1.19. ++ # NOTE: These rules have been: ++ # - unchanged between 1.12 and 1.15 ++ # - changed in 1.16 ++ # - changed in 1.17 ++ # - unchanged between 1.18 and 1.20 ++ # - changed in 1.21: get/list/watch permission for namespace, ++ # csidrivers, csistoragecapacities was added. ++ # - unchanged between 1.22 and 1.27 ++ # - changed in 1.28: permissions to get/update lock endpoint resource ++ # removed ++ # - unchanged between 1.28 and 1.29 + # +- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L696-L829 ++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862 + - apiGroups: + - "" + - events.k8s.io +@@ -50,21 +49,6 @@ rules: + verbs: + - get + - update +- - apiGroups: +- - "" +- resources: +- - endpoints +- verbs: +- - create +- - apiGroups: +- - "" +- resourceNames: +- - {{ include "jupyterhub.user-scheduler-lock.fullname" . }} +- resources: +- - endpoints +- verbs: +- - get +- - update + - apiGroups: + - "" + resources: +@@ -159,13 +143,37 @@ rules: + - get + - list + - watch ++ - apiGroups: ++ - "" ++ resources: ++ - namespaces ++ verbs: ++ - get ++ - list ++ - watch ++ - apiGroups: ++ - storage.k8s.io ++ resources: ++ - csidrivers ++ verbs: ++ - get ++ - list ++ - watch ++ - apiGroups: ++ - storage.k8s.io ++ resources: ++ - csistoragecapacities ++ verbs: ++ - get ++ - list ++ - watch + + # Copied from the system:volume-scheduler ClusterRole of the k8s version + # matching the kube-scheduler binary we use. + # +- # NOTE: These rules have not changed between 1.12 and 1.19. ++ # NOTE: These rules have not changed between 1.12 and 1.29. + # +- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1213-L1240 ++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310 + - apiGroups: + - "" + resources: +@@ -203,7 +211,7 @@ metadata: + {{- include "jupyterhub.labels" . | nindent 4 }} + subjects: + - kind: ServiceAccount +- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }} ++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} + namespace: "{{ .Release.Namespace }}" + roleRef: + kind: ClusterRole +diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml +new file mode 100644 +index 0000000..67618b0 +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml +@@ -0,0 +1,14 @@ ++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}} ++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create -}} ++apiVersion: v1 ++kind: ServiceAccount ++metadata: ++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }} ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }} ++ annotations: ++ {{- . | toYaml | nindent 4 }} ++ {{- end }} ++{{- end }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml +index 3dfb137..931a150 100755 +--- a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml ++++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml +@@ -62,23 +62,38 @@ spec: + + egress: + # singleuser-server --> hub +- - ports: +- - port: 8081 +- to: ++ - to: + - podSelector: + matchLabels: + {{- $_ := merge (dict "componentLabel" "hub") . }} + {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8081 + +- # singleuser-server --> Kubernetes internal DNS +- - ports: +- - protocol: UDP +- port: 53 +- - protocol: TCP +- port: 53 ++ # singleuser-server --> proxy ++ # singleuser-server --> autohttps ++ # ++ # While not critical for core functionality, a user or library code may rely ++ # on communicating with the proxy or autohttps pods via a k8s Service it can ++ # detected from well known environment variables. ++ # ++ - to: ++ - podSelector: ++ matchLabels: ++ {{- $_ := merge (dict "componentLabel" "proxy") . }} ++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8000 ++ - to: ++ - podSelector: ++ matchLabels: ++ {{- $_ := merge (dict "componentLabel" "autohttps") . }} ++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }} ++ ports: ++ - port: 8080 ++ - port: 8443 + +- {{- with .Values.apps.jupyterhub.singleuser.networkPolicy.egress }} +- # singleuser-server --> depends, but the default is everything +- {{- . | toYaml | nindent 4 }} ++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.singleuser.networkPolicy)) }} ++ {{- . | nindent 4 }} + {{- end }} + {{- end }} +diff --git a/applications/jupyterhub/deploy/templates/singleuser/secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml +new file mode 100644 +index 0000000..e6eab9b +--- /dev/null ++++ b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml +@@ -0,0 +1,17 @@ ++{{- if .Values.apps.jupyterhub.singleuser.extraFiles }} ++kind: Secret ++apiVersion: v1 ++metadata: ++ name: {{ include "jupyterhub.singleuser.fullname" . }} ++ labels: ++ {{- include "jupyterhub.labels" . | nindent 4 }} ++type: Opaque ++{{- with include "jupyterhub.extraFiles.data" .Values.apps.jupyterhub.singleuser.extraFiles }} ++data: ++ {{- . | nindent 2 }} ++{{- end }} ++{{- with include "jupyterhub.extraFiles.stringData" .Values.apps.jupyterhub.singleuser.extraFiles }} ++stringData: ++ {{- . | nindent 2 }} ++{{- end }} ++{{- end }} +diff --git a/applications/jupyterhub/deploy/values.schema.yaml b/applications/jupyterhub/deploy/values.schema.yaml +new file mode 100644 +index 0000000..69c13a8 +--- /dev/null ++++ b/applications/jupyterhub/deploy/values.schema.yaml +@@ -0,0 +1,3014 @@ ++# This schema (a jsonschema in YAML format) is used to generate ++# values.schema.json which is packaged with the Helm chart for client side ++# validation by helm of values before template rendering. ++# ++# This schema is also used by our documentation system to build the ++# configuration reference section based on the description fields. See ++# docs/source/conf.py for that logic! ++# ++# We look to document everything we have default values for in values.yaml, but ++# we don't look to enforce the perfect validation logic within this file. ++# ++# ref: https://json-schema.org/learn/getting-started-step-by-step.html ++# ++$schema: http://json-schema.org/draft-07/schema# ++type: object ++additionalProperties: false ++required: ++ - imagePullSecrets ++ - hub ++ - proxy ++ - singleuser ++ - ingress ++ - prePuller ++ - custom ++ - cull ++ - debug ++ - rbac ++ - global ++properties: ++ enabled: ++ type: [boolean, "null"] ++ description: | ++ `enabled` is ignored by the jupyterhub chart itself, but a chart depending ++ on the jupyterhub chart conditionally can make use this config option as ++ the condition. ++ fullnameOverride: ++ type: [string, "null"] ++ description: | ++ fullnameOverride and nameOverride allow you to adjust how the resources ++ part of the Helm chart are named. ++ ++ Name format | Resource types | fullnameOverride | nameOverride | Note ++ ------------------------- | -------------- | ---------------- | ------------ | - ++ component | namespaced | `""` | * | Default ++ release-component | cluster wide | `""` | * | Default ++ fullname-component | * | str | * | - ++ release-component | * | null | `""` | - ++ release-(name-)component | * | null | str | omitted if contained in release ++ release-(chart-)component | * | null | null | omitted if contained in release ++ ++ ```{admonition} Warning! ++ :class: warning ++ Changing fullnameOverride or nameOverride after the initial installation ++ of the chart isn't supported. Changing their values likely leads to a ++ reset of non-external JupyterHub databases, abandonment of users' storage, ++ and severed couplings to currently running user pods. ++ ``` ++ ++ If you are a developer of a chart depending on this chart, you should ++ avoid hardcoding names. If you want to reference the name of a resource in ++ this chart from a parent helm chart's template, you can make use of the ++ global named templates instead. ++ ++ ```yaml ++ # some pod definition of a parent chart helm template ++ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} ++ ``` ++ ++ To access them from a container, you can also rely on the hub ConfigMap ++ that contains entries of all the resource names. ++ ++ ```yaml ++ # some container definition in a parent chart helm template ++ env: ++ - name: SCHEDULER_NAME ++ valueFrom: ++ configMapKeyRef: ++ name: {{ include "jupyterhub.user-scheduler.fullname" . }} ++ key: user-scheduler ++ ``` ++ ++ nameOverride: ++ type: [string, "null"] ++ description: | ++ See the documentation under [`fullnameOverride`](schema_fullnameOverride). ++ ++ imagePullSecret: ++ type: object ++ required: [create] ++ if: ++ properties: ++ create: ++ const: true ++ then: ++ additionalProperties: false ++ required: [registry, username, password] ++ description: | ++ This is configuration to create a k8s Secret resource of `type: ++ kubernetes.io/dockerconfigjson`, with credentials to pull images from a ++ private image registry. If you opt to do so, it will be available for use ++ by all pods in their respective `spec.imagePullSecrets` alongside other ++ k8s Secrets defined in `imagePullSecrets` or the pod respective ++ `...image.pullSecrets` configuration. ++ ++ In other words, using this configuration option can automate both the ++ otherwise manual creation of a k8s Secret and the otherwise manual ++ configuration to reference this k8s Secret in all the pods of the Helm ++ chart. ++ ++ ```sh ++ # you won't need to create a k8s Secret manually... ++ kubectl create secret docker-registry image-pull-secret \ ++ --docker-server= \ ++ --docker-username= \ ++ --docker-email= \ ++ --docker-password= ++ ``` ++ ++ If you just want to let all Pods reference an existing secret, use the ++ [`imagePullSecrets`](schema_imagePullSecrets) configuration instead. ++ properties: ++ create: ++ type: boolean ++ description: | ++ Toggle the creation of the k8s Secret with provided credentials to ++ access a private image registry. ++ automaticReferenceInjection: ++ type: boolean ++ description: | ++ Toggle the automatic reference injection of the created Secret to all ++ pods' `spec.imagePullSecrets` configuration. ++ registry: ++ type: string ++ description: | ++ Name of the private registry you want to create a credential set for. ++ It will default to Docker Hub's image registry. ++ ++ Examples: ++ - https://index.docker.io/v1/ ++ - quay.io ++ - eu.gcr.io ++ - alexmorreale.privatereg.net ++ username: ++ type: string ++ description: | ++ Name of the user you want to use to connect to your private registry. ++ ++ For external gcr.io, you will use the `_json_key`. ++ ++ Examples: ++ - alexmorreale ++ - alex@pfc.com ++ - _json_key ++ password: ++ type: string ++ description: | ++ Password for the private image registry's user. ++ ++ Examples: ++ - plaintextpassword ++ - abc123SECRETzyx098 ++ ++ For gcr.io registries the password will be a big JSON blob for a ++ Google cloud service account, it should look something like below. ++ ++ ```yaml ++ password: |- ++ { ++ "type": "service_account", ++ "project_id": "jupyter-se", ++ "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85", ++ ... ++ } ++ ``` ++ email: ++ type: [string, "null"] ++ description: | ++ Specification of an email is most often not required, but it is ++ supported. ++ ++ imagePullSecrets: ++ type: array ++ description: | ++ Chart wide configuration to _append_ k8s Secret references to all its ++ pod's `spec.imagePullSecrets` configuration. ++ ++ This will not override or get overridden by pod specific configuration, ++ but instead augment the pod specific configuration. ++ ++ You can use both the k8s native syntax, where each list element is like ++ `{"name": "my-secret-name"}`, or you can let list elements be strings ++ naming the secrets directly. ++ ++ hub: ++ type: object ++ additionalProperties: false ++ required: [baseUrl] ++ properties: ++ revisionHistoryLimit: &revisionHistoryLimit ++ type: [integer, "null"] ++ minimum: 0 ++ description: | ++ Configures the resource's `spec.revisionHistoryLimit`. This is ++ available for Deployment, StatefulSet, and DaemonSet resources. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit) ++ for more info. ++ config: ++ type: object ++ additionalProperties: true ++ description: | ++ JupyterHub and its components (authenticators, spawners, etc), are ++ Python classes that expose its configuration through ++ [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this ++ Helm chart configuration (`hub.config`), you can directly configure ++ the Python classes through _static_ YAML values. To _dynamically_ set ++ values, you need to use [`hub.extraConfig`](schema_hub.extraConfig) ++ instead. ++ ++ ```{admonition} Currently intended only for auth config ++ :class: warning ++ This config _currently_ (0.11.0) only influence the software in the ++ `hub` Pod, but some Helm chart config options such as ++ [`hub.baseUrl`](schema_hub.baseUrl) is used to set ++ `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm ++ templates are rendered. ++ ++ As we have not yet mapped out all the potential configuration ++ conflicts except for the authentication related configuration options, ++ please accept that using it for something else at this point can lead ++ to issues. ++ ``` ++ ++ __Example__ ++ ++ If you inspect documentation or some `jupyterhub_config.py` to contain ++ the following section: ++ ++ ```python ++ c.JupyterHub.admin_access = true ++ c.JupyterHub.admin_users = ["jovyan1", "jovyan2"] ++ c.KubeSpawner.k8s_api_request_timeout = 10 ++ c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"] ++ ``` ++ ++ Then, you would be able to represent it with this configuration like: ++ ++ ```yaml ++ hub: ++ config: ++ JupyterHub: ++ admin_access: true ++ admin_users: ++ - jovyan1 ++ - jovyan2 ++ KubeSpawner: ++ k8s_api_request_timeout: 10 ++ GitHubOAuthenticator: ++ allowed_organizations: ++ - jupyterhub ++ ``` ++ ++ ```{admonition} YAML limitations ++ :class: tip ++ You can't represent Python `Bytes` or `Set` objects in YAML directly. ++ ``` ++ ++ ```{admonition} Helm value merging ++ :class: tip ++ `helm` merges a Helm chart's default values with values passed with ++ the `--values` or `-f` flag. During merging, lists are replaced while ++ dictionaries are updated. ++ ``` ++ extraFiles: &extraFiles ++ type: object ++ additionalProperties: false ++ description: | ++ A dictionary with extra files to be injected into the pod's container ++ on startup. This can for example be used to inject: configuration ++ files, custom user interface templates, images, and more. ++ ++ ```yaml ++ # NOTE: "hub" is used in this example, but the configuration is the ++ # same for "singleuser". ++ hub: ++ extraFiles: ++ # The file key is just a reference that doesn't influence the ++ # actual file name. ++ : ++ # mountPath is required and must be the absolute file path. ++ mountPath: ++ ++ # Choose one out of the three ways to represent the actual file ++ # content: data, stringData, or binaryData. ++ # ++ # data should be set to a mapping (dictionary). It will in the ++ # end be rendered to either YAML, JSON, or TOML based on the ++ # filename extension that are required to be either .yaml, .yml, ++ # .json, or .toml. ++ # ++ # If your content is YAML, JSON, or TOML, it can make sense to ++ # use data to represent it over stringData as data can be merged ++ # instead of replaced if set partially from separate Helm ++ # configuration files. ++ # ++ # Both stringData and binaryData should be set to a string ++ # representing the content, where binaryData should be the ++ # base64 encoding of the actual file content. ++ # ++ data: ++ myConfig: ++ myMap: ++ number: 123 ++ string: "hi" ++ myList: ++ - 1 ++ - 2 ++ stringData: | ++ hello world! ++ binaryData: aGVsbG8gd29ybGQhCg== ++ ++ # mode is by default 0644 and you can optionally override it ++ # either by octal notation (example: 0400) or decimal notation ++ # (example: 256). ++ mode: ++ ``` ++ ++ **Using --set-file** ++ ++ To avoid embedding entire files in the Helm chart configuration, you ++ can use the `--set-file` flag during `helm upgrade` to set the ++ stringData or binaryData field. ++ ++ ```yaml ++ hub: ++ extraFiles: ++ my_image: ++ mountPath: /usr/local/share/jupyterhub/static/my_image.png ++ ++ # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are ++ # automatically loaded in alphabetical order of the final file ++ # name when JupyterHub starts. ++ my_config: ++ mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py ++ ``` ++ ++ ```bash ++ # --set-file expects a text based file, so you need to base64 encode ++ # it manually first. ++ base64 my_image.png > my_image.png.b64 ++ ++ helm upgrade <...> \ ++ --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \ ++ --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py ++ ``` ++ ++ **Common uses** ++ ++ 1. **JupyterHub template customization** ++ ++ You can replace the default JupyterHub user interface templates in ++ the hub pod by injecting new ones to ++ `/usr/local/share/jupyterhub/templates`. These can in turn ++ reference custom images injected to ++ `/usr/local/share/jupyterhub/static`. ++ ++ 1. **JupyterHub standalone file config** ++ ++ Instead of embedding JupyterHub python configuration as a string ++ within a YAML file through ++ [`hub.extraConfig`](schema_hub.extraConfig), you can inject a ++ standalone .py file into ++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is ++ automatically loaded. ++ ++ 1. **Flexible configuration** ++ ++ By injecting files, you don't have to embed them in a docker image ++ that you have to rebuild. ++ ++ If your configuration file is a YAML/JSON/TOML file, you can also ++ use `data` instead of `stringData` which allow you to set various ++ configuration in separate Helm config files. This can be useful to ++ help dependent charts override only some configuration part of the ++ file, or to allow for the configuration be set through multiple ++ Helm configuration files. ++ ++ **Limitations** ++ ++ 1. File size ++ ++ The files in `hub.extraFiles` and `singleuser.extraFiles` are ++ respectively stored in their own k8s Secret resource. As k8s ++ Secret's are limited, typically to 1MB, you will be limited to a ++ total file size of less than 1MB as there is also base64 encoding ++ that takes place reducing available capacity to 75%. ++ ++ 2. File updates ++ ++ The files that are mounted are only set during container startup. ++ This is [because we use ++ `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) ++ as is required to avoid replacing the content of the entire ++ directory we mount in. ++ patternProperties: ++ ".*": ++ type: object ++ additionalProperties: false ++ required: [mountPath] ++ oneOf: ++ - required: [data] ++ - required: [stringData] ++ - required: [binaryData] ++ properties: ++ mountPath: ++ type: string ++ data: ++ type: object ++ additionalProperties: true ++ stringData: ++ type: string ++ binaryData: ++ type: string ++ mode: ++ type: number ++ baseUrl: ++ type: string ++ description: | ++ This is the equivalent of c.JupyterHub.base_url, but it is also needed ++ by the Helm chart in general. So, instead of setting ++ c.JupyterHub.base_url, use this configuration. ++ command: ++ type: array ++ description: | ++ A list of strings to be used to replace the JupyterHub image's ++ `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's ++ `ENTRYPOINT` is called `command`. The list of strings will be expanded ++ with Helm's template function `tpl` which can render Helm template ++ logic inside curly braces (`{{... }}`). ++ ++ This could be useful to wrap the invocation of JupyterHub itself in ++ some custom way. ++ ++ For more details, see the [Kubernetes ++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). ++ args: ++ type: array ++ description: | ++ A list of strings to be used to replace the JupyterHub image's `CMD` ++ entry as well as the Helm chart's default way to start JupyterHub. ++ Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The ++ list of strings will be expanded with Helm's template function `tpl` ++ which can render Helm template logic inside curly braces (`{{... }}`). ++ ++ ```{warning} ++ By replacing the entire configuration file, which is mounted to ++ `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart, ++ instead of appending to it with `hub.extraConfig`, you expose your ++ deployment for issues stemming from getting out of sync with the Helm ++ chart's config file. ++ ++ These kind of issues will be significantly harder to debug and ++ diagnose, and can due to this could cause a lot of time expenditure ++ for both the community maintaining the Helm chart as well as yourself, ++ even if this wasn't the reason for the issue. ++ ++ Due to this, we ask that you do your _absolute best to avoid replacing ++ the default provided `jupyterhub_config.py` file. It can often be ++ possible. For example, if your goal is to have a dedicated .py file ++ for more extensive additions that you can syntax highlight and such ++ and feel limited by passing code in `hub.extraConfig` which is part of ++ a YAML file, you can use [this ++ trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237) ++ instead. ++ ``` ++ ++ ```yaml ++ hub: ++ args: ++ - "jupyterhub" ++ - "--config" ++ - "/usr/local/etc/jupyterhub/jupyterhub_config.py" ++ - "--debug" ++ - "--upgrade-db" ++ ``` ++ ++ For more details, see the [Kubernetes ++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). ++ cookieSecret: ++ type: [string, "null"] ++ description: | ++ ```{note} ++ As of version 1.0.0 this will automatically be generated and there is ++ no need to set it manually. ++ ++ If you wish to reset a generated key, you can use `kubectl edit` on ++ the k8s Secret typically named `hub` and remove the ++ `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then ++ perform a new `helm upgrade`. ++ ``` ++ ++ A 32-byte cryptographically secure randomly generated string used to sign values of ++ secure cookies set by the hub. If unset, jupyterhub will generate one on startup and ++ save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of ++ the hub container. A value set here will make JupyterHub overwrite any previous file. ++ ++ You do not need to set this at all if you are using the default configuration for ++ storing databases - sqlite on a persistent volume (with `hub.db.type` set to the ++ default `sqlite-pvc`). If you are using an external database, then you must set this ++ value explicitly - or your users will keep getting logged out each time the hub pod ++ restarts. ++ ++ Changing this value will all user logins to be invalidated. If this secret leaks, ++ *immediately* change it to something else, or user data can be compromised ++ ++ ```sh ++ # to generate a value, run ++ openssl rand -hex 32 ++ ``` ++ image: &image-spec ++ type: object ++ additionalProperties: false ++ required: [name, tag] ++ description: | ++ Set custom image name, tag, pullPolicy, or pullSecrets for the pod. ++ properties: ++ name: ++ type: string ++ description: | ++ The name of the image, without the tag. ++ ++ ``` ++ # example name ++ gcr.io/my-project/my-image ++ ``` ++ tag: ++ type: string ++ description: | ++ The tag of the image to pull. This is the value following `:` in ++ complete image specifications. ++ ++ ``` ++ # example tags ++ v1.11.1 ++ zhy270a ++ ``` ++ pullPolicy: ++ enum: [null, "", IfNotPresent, Always, Never] ++ description: | ++ Configures the Pod's `spec.imagePullPolicy`. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images) ++ for more info. ++ pullSecrets: ++ type: array ++ description: | ++ A list of references to existing Kubernetes Secrets with ++ credentials to pull the image. ++ ++ This Pod's final `imagePullSecrets` k8s specification will be a ++ combination of: ++ ++ 1. This list of k8s Secrets, specific for this pod. ++ 2. The list of k8s Secrets, for use by all pods in the Helm chart, ++ declared in this Helm charts configuration called ++ `imagePullSecrets`. ++ 3. A k8s Secret, for use by all pods in the Helm chart, if ++ conditionally created from image registry credentials provided ++ under `imagePullSecret` if `imagePullSecret.create` is set to ++ true. ++ ++ ```yaml ++ # example - k8s native syntax ++ pullSecrets: ++ - name: my-k8s-secret-with-image-registry-credentials ++ ++ # example - simplified syntax ++ pullSecrets: ++ - my-k8s-secret-with-image-registry-credentials ++ ``` ++ networkPolicy: &networkPolicy-spec ++ type: object ++ additionalProperties: false ++ description: | ++ This configuration regards the creation and configuration of a k8s ++ _NetworkPolicy resource_. ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Toggle the creation of the NetworkPolicy resource targeting this ++ pod, and by doing so, restricting its communication to only what ++ is explicitly allowed in the NetworkPolicy. ++ ingress: ++ type: array ++ description: | ++ Additional ingress rules to add besides those that are required ++ for core functionality. ++ egress: ++ type: array ++ description: | ++ Additional egress rules to add besides those that are required for ++ core functionality and those added via ++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules). ++ ++ ```{versionchanged} 2.0.0 ++ The default value changed from providing one very permissive rule ++ allowing all egress to providing no rule. The permissive rule is ++ still provided via ++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules) ++ set to true though. ++ ``` ++ ++ As an example, below is a configuration that disables the more ++ broadly permissive `.privateIPs` egress allow rule for the hub ++ pod, and instead provides tightly scoped permissions to access a ++ specific k8s local service as identified by pod labels. ++ ++ ```yaml ++ hub: ++ networkPolicy: ++ egressAllowRules: ++ privateIPs: false ++ egress: ++ - to: ++ - podSelector: ++ matchLabels: ++ app: my-k8s-local-service ++ ports: ++ - protocol: TCP ++ port: 5978 ++ ``` ++ egressAllowRules: ++ type: object ++ additionalProperties: false ++ description: | ++ This is a set of predefined rules that when enabled will be added ++ to the NetworkPolicy list of egress rules. ++ ++ The resulting egress rules will be a composition of: ++ - rules specific for the respective pod(s) function within the ++ Helm chart ++ - rules based on enabled `egressAllowRules` flags ++ - rules explicitly specified by the user ++ ++ ```{note} ++ Each flag under this configuration will not render into a ++ dedicated rule in the NetworkPolicy resource, but instead combine ++ with the other flags to a reduced set of rules to avoid a ++ performance penalty. ++ ``` ++ ++ ```{versionadded} 2.0.0 ++ ``` ++ properties: ++ cloudMetadataServer: ++ type: boolean ++ description: | ++ Defaults to `false` for singleuser servers, but to `true` for ++ all other network policies. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to the cloud metadata server. ++ ++ Note that the `nonPrivateIPs` rule is allowing all non Private ++ IP ranges but makes an exception for the cloud metadata ++ server, leaving this as the definitive configuration to allow ++ access to the cloud metadata server. ++ ++ ```{versionchanged} 3.0.0 ++ This configuration is not allowed to be configured true at the ++ same time as ++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) ++ to avoid an ambiguous configuration. ++ ``` ++ dnsPortsCloudMetadataServer: ++ type: boolean ++ description: | ++ Defaults to `true` for all network policies. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to the cloud metadata server ++ via port 53. ++ ++ Relying on this rule for the singleuser config should go hand ++ in hand with disabling ++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) ++ to avoid an ambiguous configuration. ++ ++ Known situations when this rule can be relevant: ++ ++ - In GKE clusters with Cloud DNS that is reached at the ++ cloud metadata server's non-private IP. ++ ++ ```{note} ++ This chart doesn't know how to identify the DNS server that ++ pods will rely on due to variations between how k8s clusters ++ have been setup. Due to that, multiple rules are enabled by ++ default to ensure DNS connectivity. ++ ``` ++ ++ ```{versionadded} 3.0.0 ++ ``` ++ dnsPortsKubeSystemNamespace: ++ type: boolean ++ description: | ++ Defaults to `true` for all network policies. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to pods in the kube-system ++ namespace via port 53. ++ ++ Known situations when this rule can be relevant: ++ ++ - GKE, EKS, AKS, and other clusters relying directly on ++ `kube-dns` or `coredns` pods in the `kube-system` namespace. ++ ++ ```{note} ++ This chart doesn't know how to identify the DNS server that ++ pods will rely on due to variations between how k8s clusters ++ have been setup. Due to that, multiple rules are enabled by ++ default to ensure DNS connectivity. ++ ``` ++ ++ ```{versionadded} 3.0.0 ++ ``` ++ dnsPortsPrivateIPs: ++ type: boolean ++ description: | ++ Defaults to `true` for all network policies. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to private IPs via port 53. ++ ++ Known situations when this rule can be relevant: ++ ++ - GKE clusters relying on a DNS server indirectly via a a node ++ local DNS cache at an unknown private IP. ++ ++ ```{note} ++ This chart doesn't know how to identify the DNS server that ++ pods will rely on due to variations between how k8s clusters ++ have been setup. Due to that, multiple rules are enabled by ++ default to ensure DNS connectivity. ++ ++ ```{warning} ++ This rule is not expected to work in clusters relying on ++ Cilium to enforce the NetworkPolicy rules (includes GKE ++ clusters with Dataplane v2), this is due to a [known ++ limitation](https://github.com/cilium/cilium/issues/9209). ++ ``` ++ nonPrivateIPs: ++ type: boolean ++ description: | ++ Defaults to `true` for all network policies. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to the non-private IP ranges ++ with the exception of the cloud metadata server. This means ++ respective pod(s) can establish connections to the internet ++ but not (say) an unsecured prometheus server running in the ++ same cluster. ++ privateIPs: ++ type: boolean ++ description: | ++ Defaults to `false` for singleuser servers, but to `true` for ++ all other network policies. ++ ++ Private IPs refer to the IP ranges `10.0.0.0/8`, ++ `172.16.0.0/12`, `192.168.0.0/16`. ++ ++ When enabled this rule allows the respective pod(s) to ++ establish outbound connections to the internal k8s cluster. ++ This means users can access the internet but not (say) an ++ unsecured prometheus server running in the same cluster. ++ ++ Since not all workloads in the k8s cluster may have ++ NetworkPolicies setup to restrict their incoming connections, ++ having this set to false can be a good defense against ++ malicious intent from someone in control of software in these ++ pods. ++ ++ If possible, try to avoid setting this to true as it gives ++ broad permissions that could be specified more directly via ++ the [`.egress`](schema_singleuser.networkPolicy.egress). ++ ++ ```{warning} ++ This rule is not expected to work in clusters relying on ++ Cilium to enforce the NetworkPolicy rules (includes GKE ++ clusters with Dataplane v2), this is due to a [known ++ limitation](https://github.com/cilium/cilium/issues/9209). ++ ``` ++ interNamespaceAccessLabels: ++ enum: [accept, ignore] ++ description: | ++ This configuration option determines if both namespaces and pods ++ in other namespaces, that have specific access labels, should be ++ accepted to allow ingress (set to `accept`), or, if the labels are ++ to be ignored when applied outside the local namespace (set to ++ `ignore`). ++ ++ The available access labels for respective NetworkPolicy resources ++ are: ++ ++ - `hub.jupyter.org/network-access-hub: "true"` (hub) ++ - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik) ++ - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp) ++ - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser) ++ allowedIngressPorts: ++ type: array ++ description: | ++ A rule to allow ingress on these ports will be added no matter ++ what the origin of the request is. The default setting for ++ `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is ++ `[http, https]`, while it is `[]` for other networkPolicies. ++ ++ Note that these port names or numbers target a Pod's port name or ++ number, not a k8s Service's port name or number. ++ db: ++ type: object ++ additionalProperties: false ++ properties: ++ type: ++ enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other] ++ description: | ++ Type of database backend to use for the hub database. ++ ++ The Hub requires a persistent database to function, and this lets you specify ++ where it should be stored. ++ ++ The various options are: ++ ++ 1. **sqlite-pvc** ++ ++ Use an `sqlite` database kept on a persistent volume attached to the hub. ++ ++ By default, this disk is created by the cloud provider using ++ *dynamic provisioning* configured by a [storage ++ class](https://kubernetes.io/docs/concepts/storage/storage-classes/). ++ You can customize how this disk is created / attached by ++ setting various properties under `hub.db.pvc`. ++ ++ This is the default setting, and should work well for most cloud provider ++ deployments. ++ ++ 2. **sqlite-memory** ++ ++ Use an in-memory `sqlite` database. This should only be used for testing, ++ since the database is erased whenever the hub pod restarts - causing the hub ++ to lose all memory of users who had logged in before. ++ ++ When using this for testing, make sure you delete all other objects that the ++ hub has created (such as user pods, user PVCs, etc) every time the hub restarts. ++ Otherwise you might run into errors about duplicate resources. ++ ++ 3. **mysql** ++ ++ Use an externally hosted mysql database. ++ ++ You have to specify an sqlalchemy connection string for the mysql database you ++ want to connect to in `hub.db.url` if using this option. ++ ++ The general format of the connection string is: ++ ``` ++ mysql+pymysql://:@:/ ++ ``` ++ ++ The user specified in the connection string must have the rights to create ++ tables in the database specified. ++ ++ 4. **postgres** ++ ++ Use an externally hosted postgres database. ++ ++ You have to specify an sqlalchemy connection string for the postgres database you ++ want to connect to in `hub.db.url` if using this option. ++ ++ The general format of the connection string is: ++ ``` ++ postgresql+psycopg2://:@:/ ++ ``` ++ ++ The user specified in the connection string must have the rights to create ++ tables in the database specified. ++ ++ 5. **other** ++ ++ Use an externally hosted database of some kind other than mysql ++ or postgres. ++ ++ When using _other_, the database password must be passed as ++ part of [hub.db.url](schema_hub.db.url) as ++ [hub.db.password](schema_hub.db.password) will be ignored. ++ pvc: ++ type: object ++ additionalProperties: false ++ required: [storage] ++ description: | ++ Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`. ++ properties: ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: &labels-and-annotations-patternProperties ++ ".*": ++ type: string ++ description: | ++ Annotations to apply to the PVC containing the sqlite database. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) ++ for more details about annotations. ++ selector: ++ type: object ++ additionalProperties: true ++ description: | ++ Label selectors to set for the PVC containing the sqlite database. ++ ++ Useful when you are using a specific PV, and want to bind to ++ that and only that. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) ++ for more details about using a label selector for what PV to ++ bind to. ++ storage: ++ type: string ++ description: | ++ Size of disk to request for the database disk. ++ accessModes: ++ type: array ++ items: ++ type: [string, "null"] ++ description: | ++ AccessModes contains the desired access modes the volume ++ should have. See [the k8s ++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1) ++ for more information. ++ storageClassName: ++ type: [string, "null"] ++ description: | ++ Name of the StorageClass required by the claim. ++ ++ If this is a blank string it will be set to a blank string, ++ while if it is null, it will not be set at all. ++ subPath: ++ type: [string, "null"] ++ description: | ++ Path within the volume from which the container's volume ++ should be mounted. Defaults to "" (volume's root). ++ upgrade: ++ type: [boolean, "null"] ++ description: | ++ Users with external databases need to opt-in for upgrades of the ++ JupyterHub specific database schema if needed as part of a ++ JupyterHub version upgrade. ++ url: ++ type: [string, "null"] ++ description: | ++ Connection string when `hub.db.type` is mysql or postgres. ++ ++ See documentation for `hub.db.type` for more details on the format of this property. ++ password: ++ type: [string, "null"] ++ description: | ++ Password for the database when `hub.db.type` is mysql or postgres. ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the hub pod. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ initContainers: ++ type: array ++ description: | ++ list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) ++ ++ ```yaml ++ hub: ++ initContainers: ++ - name: init-myservice ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command1'] ++ - name: init-mydb ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command2'] ++ ``` ++ extraEnv: ++ type: [object, array] ++ additionalProperties: true ++ description: | ++ Extra environment variables that should be set for the hub pod. ++ ++ Environment variables are usually used to: ++ - Pass parameters to some custom code in `hub.extraConfig`. ++ - Configure code running in the hub pod, such as an authenticator or ++ spawner. ++ ++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which ++ is a part of Kubernetes. ++ ++ ```yaml ++ hub: ++ extraEnv: ++ # basic notation (for literal values only) ++ MY_ENV_VARS_NAME1: "my env var value 1" ++ ++ # explicit notation (the "name" field takes precedence) ++ HUB_NAMESPACE: ++ name: HUB_NAMESPACE ++ valueFrom: ++ fieldRef: ++ fieldPath: metadata.namespace ++ ++ # implicit notation (the "name" field is implied) ++ PREFIXED_HUB_NAMESPACE: ++ value: "my-prefix-$(HUB_NAMESPACE)" ++ SECRET_VALUE: ++ valueFrom: ++ secretKeyRef: ++ name: my-k8s-secret ++ key: password ++ ``` ++ ++ For more information, see the [Kubernetes EnvVar ++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). ++ extraConfig: ++ type: object ++ additionalProperties: true ++ description: | ++ Arbitrary extra python based configuration that should be in `jupyterhub_config.py`. ++ ++ This is the *escape hatch* - if you want to configure JupyterHub to do something specific ++ that is not present here as an option, you can write the raw Python to do it here. ++ ++ extraConfig is a *dict*, so there can be multiple configuration ++ snippets under different names. The configuration sections are run in ++ alphabetical order based on the keys. ++ ++ Non-exhaustive examples of things you can do here: ++ - Subclass authenticator / spawner to do a custom thing ++ - Dynamically launch different images for different sets of images ++ - Inject an auth token from GitHub authenticator into user pod ++ - Anything else you can think of! ++ ++ Since this is usually a multi-line string, you want to format it using YAML's ++ [| operator](https://yaml.org/spec/1.2.2/#23-scalars). ++ ++ For example: ++ ++ ```yaml ++ hub: ++ extraConfig: ++ myConfig.py: | ++ c.JupyterHub.something = 'something' ++ c.Spawner.something_else = 'something else' ++ ``` ++ ++ ```{note} ++ No code validation is performed until JupyterHub loads it! If you make ++ a typo here, it will probably manifest itself as the hub pod failing ++ to start up and instead entering an `Error` state or the subsequent ++ `CrashLoopBackoff` state. ++ ++ To make use of your own programs linters etc, it would be useful to ++ not embed Python code inside a YAML file. To do that, consider using ++ [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to ++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your ++ extra configuration logic. ++ ``` ++ ++ fsGid: ++ type: [integer, "null"] ++ minimum: 0 ++ # This schema entry is needed to help us print a more helpful error ++ # message in NOTES.txt if hub.fsGid is set. ++ # ++ description: | ++ ```{note} ++ Removed in version 2.0.0. Use ++ [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify ++ `fsGroup` instead. ++ ``` ++ service: ++ type: object ++ additionalProperties: false ++ description: | ++ Object to configure the service the JupyterHub will be exposed on by the Kubernetes server. ++ properties: ++ type: ++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName] ++ description: | ++ The Kubernetes ServiceType to be used. ++ ++ The default type is `ClusterIP`. ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) ++ to learn more about service types. ++ ports: ++ type: object ++ additionalProperties: false ++ description: | ++ Object to configure the ports the hub service will be deployed on. ++ properties: ++ nodePort: ++ type: [integer, "null"] ++ minimum: 0 ++ description: | ++ The nodePort to deploy the hub service on. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Kubernetes annotations to apply to the hub service. ++ extraPorts: ++ type: array ++ description: | ++ Extra ports to add to the Hub Service object besides `hub` / `8081`. ++ This should be an array that includes `name`, `port`, and `targetPort`. ++ See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details. ++ loadBalancerIP: ++ type: [string, "null"] ++ description: | ++ A public IP address the hub Kubernetes service should be exposed ++ on. To expose the hub directly is not recommended. Instead route ++ traffic through the proxy-public service towards the hub. ++ ++ pdb: &pdb-spec ++ type: object ++ additionalProperties: false ++ description: | ++ Configure a PodDisruptionBudget for this Deployment. ++ ++ These are disabled by default for our deployments that don't support ++ being run in parallel with multiple replicas. Only the user-scheduler ++ currently supports being run in parallel with multiple replicas. If ++ they are enabled for a Deployment with only one replica, they will ++ block `kubectl drain` of a node for example. ++ ++ Note that if you aim to block scaling down a node with the ++ hub/proxy/autohttps pod that would cause disruptions of the ++ deployment, then you should instead annotate the pods of the ++ Deployment [as described ++ here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node). ++ ++ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) ++ for more details about disruptions. ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Decides if a PodDisruptionBudget is created targeting the ++ Deployment's pods. ++ maxUnavailable: ++ type: [integer, "null"] ++ description: | ++ The maximum number of pods that can be unavailable during ++ voluntary disruptions. ++ minAvailable: ++ type: [integer, "null"] ++ description: | ++ The minimum number of pods required to be available during ++ voluntary disruptions. ++ existingSecret: ++ type: [string, "null"] ++ description: | ++ This option allow you to provide the name of an existing k8s Secret to ++ use alongside of the chart managed k8s Secret. The content of this k8s ++ Secret will be merged with the chart managed k8s Secret, giving ++ priority to the self-managed k8s Secret. ++ ++ ```{warning} ++ 1. The self managed k8s Secret must mirror the structure in the chart ++ managed secret. ++ 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka. ++ `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from ++ the chart managed k8s Secret. ++ ``` ++ nodeSelector: &nodeSelector-spec ++ type: object ++ additionalProperties: true ++ description: | ++ An object with key value pairs representing labels. K8s Nodes are ++ required to have match all these labels for this Pod to scheduled on ++ them. ++ ++ ```yaml ++ disktype: ssd ++ nodetype: awesome ++ ``` ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) ++ for more details. ++ tolerations: &tolerations-spec ++ type: array ++ description: | ++ Tolerations allow a pod to be scheduled on nodes with taints. These ++ tolerations are additional tolerations to the tolerations common to ++ all pods of a their respective kind ++ ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations), ++ [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)). ++ ++ Pass this field an array of ++ [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core) ++ objects. ++ ++ See the [Kubernetes ++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) ++ for more info. ++ activeServerLimit: ++ type: [integer, "null"] ++ description: &jupyterhub-native-config-description | ++ JupyterHub native configuration, see the [JupyterHub ++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html) ++ for more information. ++ allowNamedServers: ++ type: [boolean, "null"] ++ description: *jupyterhub-native-config-description ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ K8s annotations for the hub pod. ++ authenticatePrometheus: ++ type: [boolean, "null"] ++ description: *jupyterhub-native-config-description ++ concurrentSpawnLimit: ++ type: [integer, "null"] ++ description: *jupyterhub-native-config-description ++ consecutiveFailureLimit: ++ type: [integer, "null"] ++ description: *jupyterhub-native-config-description ++ podSecurityContext: &podSecurityContext-spec ++ additionalProperties: true ++ description: | ++ A k8s native specification of the pod's security context, see [the ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core) ++ for details. ++ containerSecurityContext: &containerSecurityContext-spec ++ type: object ++ additionalProperties: true ++ description: | ++ A k8s native specification of the container's security context, see [the ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core) ++ for details. ++ deploymentStrategy: ++ type: object ++ additionalProperties: false ++ properties: ++ rollingUpdate: ++ type: [string, "null"] ++ type: ++ type: [string, "null"] ++ description: | ++ JupyterHub does not support running in parallel, due to this we ++ default to using a deployment strategy of Recreate. ++ extraContainers: &extraContainers-spec ++ type: array ++ description: | ++ Additional containers for the Pod. Use a k8s native syntax. ++ extraVolumeMounts: &extraVolumeMounts-spec ++ type: array ++ description: | ++ Additional volume mounts for the Container. Use a k8s native syntax. ++ extraVolumes: &extraVolumes-spec ++ type: array ++ description: | ++ Additional volumes for the Pod. Use a k8s native syntax. ++ livenessProbe: &probe-spec ++ type: object ++ additionalProperties: true ++ required: [enabled] ++ if: ++ properties: ++ enabled: ++ const: true ++ then: ++ description: | ++ This config option is like the k8s native specification of a ++ container probe, except that it also supports an `enabled` boolean ++ flag. ++ ++ See [the k8s ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core) ++ for more details. ++ readinessProbe: *probe-spec ++ namedServerLimitPerUser: ++ type: [integer, "null"] ++ description: *jupyterhub-native-config-description ++ redirectToServer: ++ type: [boolean, "null"] ++ description: *jupyterhub-native-config-description ++ resources: &resources-spec ++ type: object ++ additionalProperties: true ++ description: | ++ A k8s native specification of resources, see [the ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core). ++ lifecycle: &lifecycle-spec ++ type: object ++ additionalProperties: false ++ description: | ++ A k8s native specification of lifecycle hooks on the container, see [the ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core). ++ properties: ++ postStart: ++ type: object ++ additionalProperties: true ++ preStop: ++ type: object ++ additionalProperties: true ++ services: ++ type: object ++ additionalProperties: true ++ description: | ++ This is where you register JupyterHub services. For details on how to ++ configure these services in this Helm chart just keep reading but for ++ details on services themselves instead read [JupyterHub's ++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html). ++ ++ ```{note} ++ Only a selection of JupyterHub's configuration options that can be ++ configured for a service are documented below. All configuration set ++ here will be applied even if this Helm chart doesn't recognize it. ++ ``` ++ ++ JupyterHub's native configuration accepts a list of service objects, ++ this Helm chart only accept a dictionary where each key represents the ++ name of a service and the value is the actual service objects. ++ ++ When configuring JupyterHub services via this Helm chart, the `name` ++ field can be omitted as it can be implied by the dictionary key. ++ Further, the `api_token` field can be omitted as it will be ++ automatically generated as of version 1.1.0 of this Helm chart. ++ ++ If you have an external service that needs to access the automatically ++ generated api_token for the service, you can access it from the `hub` ++ k8s Secret part of this Helm chart under the key ++ `hub.services.my-service-config-key.apiToken`. ++ ++ Here is an example configuration of two services where the first ++ explicitly sets a name and api_token, while the second omits those and ++ lets the name be implied from the key name and the api_token be ++ automatically generated. ++ ++ ```yaml ++ hub: ++ services: ++ my-service-1: ++ admin: true ++ name: my-explicitly-set-service-name ++ api_token: my-explicitly-set-api_token ++ ++ # the name of the following service will be my-service-2 ++ # the api_token of the following service will be generated ++ my-service-2: {} ++ ``` ++ ++ If you develop a Helm chart depending on the JupyterHub Helm chart and ++ want to let some Pod's environment variable be populated with the ++ api_token of a service registered like above, then do something along ++ these lines. ++ ++ ```yaml ++ # ... container specification of a pod ... ++ env: ++ - name: MY_SERVICE_1_API_TOKEN ++ valueFrom: ++ secretKeyRef: ++ # Don't hardcode the name, use the globally accessible ++ # named templates part of the JupyterHub Helm chart. ++ name: {{ include "jupyterhub.hub.fullname" . }} ++ # Note below the use of the configuration key my-service-1 ++ # rather than the explicitly set service name. ++ key: hub.services.my-service-1.apiToken ++ ``` ++ properties: ++ name: ++ type: string ++ description: | ++ The name can be implied via the key name under which this ++ service is configured, and is due to that allowed to be ++ omitted in this Helm chart configuration of JupyterHub. ++ admin: ++ type: boolean ++ command: ++ type: [string, array] ++ url: ++ type: string ++ api_token: ++ type: [string, "null"] ++ description: | ++ The api_token will be automatically generated if not ++ explicitly set. It will also be exposed in via a k8s Secret ++ part of this Helm chart under a specific key. ++ ++ See the documentation under ++ [`hub.services`](schema_hub.services) for details about this. ++ apiToken: ++ type: [string, "null"] ++ description: | ++ An alias for api_token provided for backward compatibility by ++ the JupyterHub Helm chart that will be transformed to ++ api_token. ++ loadRoles: ++ type: object ++ additionalProperties: true ++ description: | ++ This is where you should define JupyterHub roles and apply them to ++ JupyterHub users, groups, and services to grant them additional ++ permissions as defined in JupyterHub's RBAC system. ++ ++ Complement this documentation with [JupyterHub's ++ documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles) ++ about `load_roles`. ++ ++ Note that while JupyterHub's native configuration `load_roles` accepts ++ a list of role objects, this Helm chart only accepts a dictionary where ++ each key represents the name of a role and the value is the actual ++ role object. ++ ++ ```yaml ++ hub: ++ loadRoles: ++ teacher: ++ description: Access to users' information and group membership ++ ++ # this role provides permissions to... ++ scopes: [users, groups] ++ ++ # this role will be assigned to... ++ users: [erik] ++ services: [grading-service] ++ groups: [teachers] ++ ``` ++ ++ When configuring JupyterHub roles via this Helm chart, the `name` ++ field can be omitted as it can be implied by the dictionary key. ++ shutdownOnLogout: ++ type: [boolean, "null"] ++ description: *jupyterhub-native-config-description ++ templatePaths: ++ type: array ++ description: *jupyterhub-native-config-description ++ templateVars: ++ type: object ++ additionalProperties: true ++ description: *jupyterhub-native-config-description ++ serviceAccount: &serviceAccount ++ type: object ++ required: [create] ++ additionalProperties: false ++ description: | ++ Configuration for a k8s ServiceAccount dedicated for use by the ++ specific pod which this configuration is nested under. ++ properties: ++ create: ++ type: boolean ++ description: | ++ Whether or not to create the `ServiceAccount` resource. ++ name: ++ type: ["string", "null"] ++ description: | ++ This configuration serves multiple purposes: ++ ++ - It will be the `serviceAccountName` referenced by related Pods. ++ - If `create` is set, the created ServiceAccount resource will be named like this. ++ - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name. ++ ++ If not explicitly provided, a default name will be used. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Kubernetes annotations to apply to the k8s ServiceAccount. ++ extraPodSpec: &extraPodSpec-spec ++ type: object ++ additionalProperties: true ++ description: | ++ Arbitrary extra k8s pod specification as a YAML object. The default ++ value of this setting is an empty object, i.e. no extra configuration. ++ The value of this property is augmented to the pod specification as-is. ++ ++ This is a powerful tool for expert k8s administrators with advanced ++ configuration requirements. This setting should only be used for ++ configuration that cannot be accomplished through the other settings. ++ Misusing this setting can break your deployment and/or compromise ++ your system security. ++ ++ This is one of four related settings for inserting arbitrary pod ++ specification: ++ ++ 1. hub.extraPodSpec ++ 2. proxy.chp.extraPodSpec ++ 3. proxy.traefik.extraPodSpec ++ 4. scheduling.userScheduler.extraPodSpec ++ ++ One real-world use of these settings is to enable host networking. For ++ example, to configure host networking for the hub pod, add the ++ following to your helm configuration values: ++ ++ ```yaml ++ hub: ++ extraPodSpec: ++ hostNetwork: true ++ dnsPolicy: ClusterFirstWithHostNet ++ ``` ++ ++ Likewise, to configure host networking for the proxy pod, add the ++ following: ++ ++ ```yaml ++ proxy: ++ chp: ++ extraPodSpec: ++ hostNetwork: true ++ dnsPolicy: ClusterFirstWithHostNet ++ ``` ++ ++ N.B. Host networking has special security implications and can easily ++ break your deployment. This is an example—not an endorsement. ++ ++ See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) ++ for the latest pod resource specification. ++ ++ proxy: ++ type: object ++ additionalProperties: false ++ properties: ++ chp: ++ type: object ++ additionalProperties: false ++ description: | ++ Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic ++ both to itself and to user pods. ++ properties: ++ revisionHistoryLimit: *revisionHistoryLimit ++ networkPolicy: *networkPolicy-spec ++ extraCommandLineFlags: ++ type: array ++ description: | ++ A list of strings to be added as command line options when ++ starting ++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options) ++ that will be expanded with Helm's template function `tpl` which ++ can render Helm template logic inside curly braces (`{{ ... }}`). ++ ++ ```yaml ++ proxy: ++ chp: ++ extraCommandLineFlags: ++ - "--auto-rewrite" ++ - "--custom-header {{ .Values.myCustomStuff }}" ++ ``` ++ ++ Note that these will be appended last, and if you provide the same ++ flag twice, the last flag will be used, which mean you can ++ override the default flag values as well. ++ extraEnv: ++ type: [object, array] ++ additionalProperties: true ++ description: | ++ Extra environment variables that should be set for the chp pod. ++ ++ Environment variables are usually used here to: ++ - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values ++ - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys ++ ++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which ++ is a part of Kubernetes. ++ ++ ```yaml ++ proxy: ++ chp: ++ extraEnv: ++ # basic notation (for literal values only) ++ MY_ENV_VARS_NAME1: "my env var value 1" ++ ++ # explicit notation (the "name" field takes precedence) ++ CHP_NAMESPACE: ++ name: CHP_NAMESPACE ++ valueFrom: ++ fieldRef: ++ fieldPath: metadata.namespace ++ ++ # implicit notation (the "name" field is implied) ++ PREFIXED_CHP_NAMESPACE: ++ value: "my-prefix-$(CHP_NAMESPACE)" ++ SECRET_VALUE: ++ valueFrom: ++ secretKeyRef: ++ name: my-k8s-secret ++ key: password ++ ``` ++ ++ For more information, see the [Kubernetes EnvVar ++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). ++ pdb: *pdb-spec ++ nodeSelector: *nodeSelector-spec ++ tolerations: *tolerations-spec ++ containerSecurityContext: *containerSecurityContext-spec ++ image: *image-spec ++ livenessProbe: *probe-spec ++ readinessProbe: *probe-spec ++ resources: *resources-spec ++ defaultTarget: ++ type: [string, "null"] ++ description: | ++ Override the URL for the default routing target for the proxy. ++ Defaults to JupyterHub itself. ++ This will generally only have an effect while JupyterHub is not running, ++ as JupyterHub adds itself as the default target after it starts. ++ errorTarget: ++ type: [string, "null"] ++ description: | ++ Override the URL for the error target for the proxy. ++ Defaults to JupyterHub itself. ++ Useful to reduce load on the Hub ++ or produce more informative error messages than the Hub's default, ++ e.g. in highly customized deployments such as BinderHub. ++ See Configurable HTTP Proxy for details on implementing an error target. ++ extraPodSpec: *extraPodSpec-spec ++ secretToken: ++ type: [string, "null"] ++ description: | ++ ```{note} ++ As of version 1.0.0 this will automatically be generated and there is ++ no need to set it manually. ++ ++ If you wish to reset a generated key, you can use `kubectl edit` on ++ the k8s Secret typically named `hub` and remove the ++ `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret, ++ then perform a new `helm upgrade`. ++ ``` ++ ++ A 32-byte cryptographically secure randomly generated string used to ++ secure communications between the hub pod and the proxy pod running a ++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) ++ instance. ++ ++ ```sh ++ # to generate a value, run ++ openssl rand -hex 32 ++ ``` ++ ++ Changing this value will cause the proxy and hub pods to restart. It is good security ++ practice to rotate these values over time. If this secret leaks, *immediately* change ++ it to something else, or user data can be compromised. ++ service: ++ type: object ++ additionalProperties: false ++ description: | ++ Configuration of the k8s Service `proxy-public` which either will ++ point to the `autohttps` pod running Traefik for TLS termination, or ++ the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from ++ users on the internet should always go through this k8s Service. ++ ++ When this service targets the `autohttps` pod which then routes to the ++ `proxy` pod, a k8s Service named `proxy-http` will be added targeting ++ the `proxy` pod and only accepting HTTP traffic on port 80. ++ properties: ++ type: ++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName] ++ description: | ++ Default `LoadBalancer`. ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) ++ to learn more about service types. ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the proxy service. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Annotations to apply to the service that is exposing the proxy. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) ++ for more details about annotations. ++ nodePorts: ++ type: object ++ additionalProperties: false ++ description: | ++ Object to set NodePorts to expose the service on for http and https. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) ++ for more details about NodePorts. ++ properties: ++ http: ++ type: [integer, "null"] ++ description: | ++ The HTTP port the proxy-public service should be exposed on. ++ https: ++ type: [integer, "null"] ++ description: | ++ The HTTPS port the proxy-public service should be exposed on. ++ disableHttpPort: ++ type: boolean ++ description: | ++ Default `false`. ++ ++ If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port. ++ extraPorts: ++ type: array ++ description: | ++ Extra ports the k8s Service should accept incoming traffic on, ++ which will be redirected to either the `autohttps` pod (treafik) ++ or the `proxy` pod (chp). ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core) ++ for the structure of the items in this list. ++ loadBalancerIP: ++ type: [string, "null"] ++ description: | ++ The public IP address the proxy-public Kubernetes service should ++ be exposed on. This entry will end up at the configurable proxy ++ server that JupyterHub manages, which will direct traffic to user ++ pods at the `/user` path and the hub pod at the `/hub` path. ++ ++ Set this if you want to use a fixed external IP address instead of ++ a dynamically acquired one. This is relevant if you have a domain ++ name that you want to point to a specific IP and want to ensure it ++ doesn't change. ++ loadBalancerSourceRanges: ++ type: array ++ description: | ++ A list of IP CIDR ranges that are allowed to access the load balancer service. ++ Defaults to allowing everyone to access it. ++ https: ++ type: object ++ additionalProperties: false ++ description: | ++ Object for customizing the settings for HTTPS used by the JupyterHub's proxy. ++ For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https) ++ properties: ++ enabled: ++ type: [boolean, "null"] ++ description: | ++ Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided. ++ type: ++ enum: [null, "", letsencrypt, manual, offload, secret] ++ description: | ++ The type of HTTPS encryption that is used. ++ Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object. ++ Defaults to `letsencrypt`. ++ letsencrypt: ++ type: object ++ additionalProperties: false ++ properties: ++ contactEmail: ++ type: [string, "null"] ++ description: | ++ The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https). ++ Required for automatic HTTPS. ++ acmeServer: ++ type: [string, "null"] ++ description: | ++ Let's Encrypt is one of various ACME servers that can provide ++ a certificate, and by default their production server is used. ++ ++ Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory ++ Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory ++ manual: ++ type: object ++ additionalProperties: false ++ description: | ++ Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`. ++ See [Set up manual HTTPS](setup-manual-https) ++ properties: ++ key: ++ type: [string, "null"] ++ description: | ++ The RSA private key to be used for HTTPS. ++ To be provided in the form of ++ ++ ``` ++ key: | ++ -----BEGIN RSA PRIVATE KEY----- ++ ... ++ -----END RSA PRIVATE KEY----- ++ ``` ++ cert: ++ type: [string, "null"] ++ description: | ++ The certificate to be used for HTTPS. ++ To be provided in the form of ++ ++ ``` ++ cert: | ++ -----BEGIN CERTIFICATE----- ++ ... ++ -----END CERTIFICATE----- ++ ``` ++ secret: ++ type: object ++ additionalProperties: false ++ description: | ++ Secret to be provided when setting `https.type` to `secret`. ++ properties: ++ name: ++ type: [string, "null"] ++ description: | ++ Name of the secret ++ key: ++ type: [string, "null"] ++ description: | ++ Path to the private key to be used for HTTPS. ++ Example: `'tls.key'` ++ crt: ++ type: [string, "null"] ++ description: | ++ Path to the certificate to be used for HTTPS. ++ Example: `'tls.crt'` ++ hosts: ++ type: array ++ description: | ++ You domain in list form. ++ Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https). ++ To be provided like: ++ ``` ++ hosts: ++ - ++ ``` ++ traefik: ++ type: object ++ additionalProperties: false ++ description: | ++ Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled ++ properties: ++ revisionHistoryLimit: *revisionHistoryLimit ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the traefik pod. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ networkPolicy: *networkPolicy-spec ++ extraInitContainers: ++ type: array ++ description: | ++ list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) ++ ++ ```yaml ++ proxy: ++ traefik: ++ extraInitContainers: ++ - name: init-myservice ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command1'] ++ - name: init-mydb ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command2'] ++ ``` ++ extraEnv: ++ type: [object, array] ++ additionalProperties: true ++ description: | ++ Extra environment variables that should be set for the traefik pod. ++ ++ Environment Variables here may be used to configure traefik. ++ ++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which ++ is a part of Kubernetes. ++ ++ ```yaml ++ proxy: ++ traefik: ++ extraEnv: ++ # basic notation (for literal values only) ++ MY_ENV_VARS_NAME1: "my env var value 1" ++ ++ # explicit notation (the "name" field takes precedence) ++ TRAEFIK_NAMESPACE: ++ name: TRAEFIK_NAMESPACE ++ valueFrom: ++ fieldRef: ++ fieldPath: metadata.namespace ++ ++ # implicit notation (the "name" field is implied) ++ PREFIXED_TRAEFIK_NAMESPACE: ++ value: "my-prefix-$(TRAEFIK_NAMESPACE)" ++ SECRET_VALUE: ++ valueFrom: ++ secretKeyRef: ++ name: my-k8s-secret ++ key: password ++ ``` ++ ++ For more information, see the [Kubernetes EnvVar ++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). ++ pdb: *pdb-spec ++ nodeSelector: *nodeSelector-spec ++ tolerations: *tolerations-spec ++ containerSecurityContext: *containerSecurityContext-spec ++ extraDynamicConfig: ++ type: object ++ additionalProperties: true ++ description: | ++ This refers to traefik's post-startup configuration. ++ ++ This Helm chart already provide such configuration, so this is a ++ place where you can merge in additional configuration. If you are ++ about to use this configuration, you may want to inspect the ++ default configuration declared ++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml). ++ extraPorts: ++ type: array ++ description: | ++ Extra ports for the traefik container within the autohttps pod ++ that you would like to expose, formatted in a k8s native way. ++ extraStaticConfig: ++ type: object ++ additionalProperties: true ++ description: | ++ This refers to traefik's startup configuration. ++ ++ This Helm chart already provide such configuration, so this is a ++ place where you can merge in additional configuration. If you are ++ about to use this configuration, you may want to inspect the ++ default configuration declared ++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml). ++ extraVolumes: *extraVolumes-spec ++ extraVolumeMounts: *extraVolumeMounts-spec ++ hsts: ++ type: object ++ additionalProperties: false ++ required: [includeSubdomains, maxAge, preload] ++ description: | ++ This section regards a HTTP Strict-Transport-Security (HSTS) ++ response header. It can act as a request for a visiting web ++ browsers to enforce HTTPS on their end in for a given time into ++ the future, and optionally also for future requests to subdomains. ++ ++ These settings relate to traefik configuration which we use as a ++ TLS termination proxy. ++ ++ See [Mozilla's ++ documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) ++ for more information. ++ properties: ++ includeSubdomains: ++ type: boolean ++ maxAge: ++ type: integer ++ preload: ++ type: boolean ++ image: *image-spec ++ resources: *resources-spec ++ serviceAccount: *serviceAccount ++ extraPodSpec: *extraPodSpec-spec ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ K8s labels for the proxy pod. ++ ++ ```{note} ++ For consistency, this should really be located under ++ proxy.chp.labels but isn't for historical reasons. ++ ``` ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ K8s annotations for the proxy pod. ++ ++ ```{note} ++ For consistency, this should really be located under ++ proxy.chp.annotations but isn't for historical reasons. ++ ``` ++ deploymentStrategy: ++ type: object ++ additionalProperties: false ++ properties: ++ rollingUpdate: ++ type: [string, "null"] ++ type: ++ type: [string, "null"] ++ description: | ++ While the proxy pod running ++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) ++ could run in parallel, two instances running in parallel wouldn't ++ both receive updates from JupyterHub regarding how it should route ++ traffic. Due to this we default to using a deployment strategy of ++ Recreate instead of RollingUpdate. ++ secretSync: ++ type: object ++ additionalProperties: false ++ description: | ++ This configuration section refers to configuration of the sidecar ++ container in the autohttps pod running next to its traefik container ++ responsible for TLS termination. ++ ++ The purpose of this container is to store away and load TLS ++ certificates from a k8s Secret. The TLS certificates are acquired by ++ the ACME client (LEGO) that is running within the traefik container, ++ where traefik is using them for TLS termination. ++ properties: ++ containerSecurityContext: *containerSecurityContext-spec ++ image: *image-spec ++ resources: *resources-spec ++ ++ singleuser: ++ type: object ++ additionalProperties: false ++ description: | ++ Options for customizing the environment that is provided to the users after they log in. ++ properties: ++ networkPolicy: *networkPolicy-spec ++ podNameTemplate: ++ type: [string, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template). ++ cpu: ++ type: object ++ additionalProperties: false ++ description: | ++ Set CPU limits & guarantees that are enforced for each user. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) ++ for more info. ++ properties: ++ limit: ++ type: [number, "null"] ++ guarantee: ++ type: [number, "null"] ++ memory: ++ type: object ++ additionalProperties: false ++ description: | ++ Set Memory limits & guarantees that are enforced for each user. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) ++ for more info. ++ properties: ++ limit: ++ type: [number, string, "null"] ++ guarantee: ++ type: [number, string, "null"] ++ description: | ++ Note that this field is referred to as *requests* by the Kubernetes API. ++ image: *image-spec ++ initContainers: ++ type: array ++ description: | ++ list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) ++ ++ ```yaml ++ singleuser: ++ initContainers: ++ - name: init-myservice ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command1'] ++ - name: init-mydb ++ image: busybox:1.28 ++ command: ['sh', '-c', 'command2'] ++ ``` ++ profileList: ++ type: array ++ description: | ++ For more information about the profile list, see [KubeSpawner's ++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner) ++ as this is simply a passthrough to that configuration. ++ ++ ```{note} ++ The image-pullers are aware of the overrides of images in ++ `singleuser.profileList` but they won't be if you configure it in ++ JupyterHub's configuration of '`c.KubeSpawner.profile_list`. ++ ``` ++ ++ ```yaml ++ singleuser: ++ profileList: ++ - display_name: "Default: Shared, 8 CPU cores" ++ description: "Your code will run on a shared machine with CPU only." ++ default: True ++ - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU" ++ description: "Your code will run a personal machine with a GPU." ++ kubespawner_override: ++ extra_resource_limits: ++ nvidia.com/gpu: "1" ++ ``` ++ extraFiles: *extraFiles ++ extraEnv: ++ type: [object, array] ++ additionalProperties: true ++ description: | ++ Extra environment variables that should be set for the user pods. ++ ++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which ++ is a part of Kubernetes. Note that the user pods will already have ++ access to a set of environment variables that you can use, like ++ `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these ++ inspect [this source ++ code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763). ++ ++ ```yaml ++ singleuser: ++ extraEnv: ++ # basic notation (for literal values only) ++ MY_ENV_VARS_NAME1: "my env var value 1" ++ ++ # explicit notation (the "name" field takes precedence) ++ USER_NAMESPACE: ++ name: USER_NAMESPACE ++ valueFrom: ++ fieldRef: ++ fieldPath: metadata.namespace ++ ++ # implicit notation (the "name" field is implied) ++ PREFIXED_USER_NAMESPACE: ++ value: "my-prefix-$(USER_NAMESPACE)" ++ SECRET_VALUE: ++ valueFrom: ++ secretKeyRef: ++ name: my-k8s-secret ++ key: password ++ ``` ++ ++ For more information, see the [Kubernetes EnvVar ++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core). ++ nodeSelector: *nodeSelector-spec ++ extraTolerations: *tolerations-spec ++ extraNodeAffinity: ++ type: object ++ additionalProperties: false ++ description: | ++ Affinities describe where pods prefer or require to be scheduled, they ++ may prefer or require a node where they are to be scheduled to have a ++ certain label (node affinity). They may also require to be scheduled ++ in proximity or with a lack of proximity to another pod (pod affinity ++ and anti pod affinity). ++ ++ See the [Kubernetes ++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) ++ for more info. ++ properties: ++ required: ++ type: array ++ description: | ++ Pass this field an array of ++ [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core) ++ objects. ++ preferred: ++ type: array ++ description: | ++ Pass this field an array of ++ [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core) ++ objects. ++ extraPodAffinity: ++ type: object ++ additionalProperties: false ++ description: | ++ See the description of `singleuser.extraNodeAffinity`. ++ properties: ++ required: ++ type: array ++ description: | ++ Pass this field an array of ++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core) ++ objects. ++ preferred: ++ type: array ++ description: | ++ Pass this field an array of ++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core) ++ objects. ++ extraPodAntiAffinity: ++ type: object ++ additionalProperties: false ++ description: | ++ See the description of `singleuser.extraNodeAffinity`. ++ properties: ++ required: ++ type: array ++ description: | ++ Pass this field an array of ++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core) ++ objects. ++ preferred: ++ type: array ++ description: | ++ Pass this field an array of ++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core) ++ objects. ++ cloudMetadata: ++ type: object ++ additionalProperties: false ++ required: [blockWithIptables, ip] ++ description: | ++ Please refer to dedicated section in [the Helm chart ++ documentation](block-metadata-iptables) for more information about ++ this. ++ properties: ++ blockWithIptables: ++ type: boolean ++ ip: ++ type: string ++ ++ cmd: ++ type: [array, string, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd). ++ The default is "jupyterhub-singleuser". ++ Use `cmd: null` to launch a custom CMD from the image, ++ which must launch jupyterhub-singleuser or an equivalent process eventually. ++ For example: Jupyter's docker-stacks images. ++ defaultUrl: ++ type: [string, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url). ++ # FIXME: name mismatch, named events_enabled in kubespawner ++ events: ++ type: [boolean, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled). ++ extraAnnotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations). ++ extraContainers: ++ type: array ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers). ++ extraLabels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels). ++ extraPodConfig: ++ type: object ++ additionalProperties: true ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config). ++ extraResource: ++ type: object ++ additionalProperties: false ++ properties: ++ # FIXME: name mismatch, named extra_resource_guarantees in kubespawner ++ guarantees: ++ type: object ++ additionalProperties: true ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees). ++ # FIXME: name mismatch, named extra_resource_limits in kubespawner ++ limits: ++ type: object ++ additionalProperties: true ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits). ++ fsGid: ++ type: [integer, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid). ++ lifecycleHooks: ++ type: object ++ additionalProperties: false ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks). ++ properties: ++ postStart: ++ type: object ++ additionalProperties: true ++ preStop: ++ type: object ++ additionalProperties: true ++ networkTools: ++ type: object ++ additionalProperties: false ++ description: | ++ This configuration section refers to configuration of a conditionally ++ created initContainer for the user pods with a purpose to block a ++ specific IP address. ++ ++ This initContainer will be created if ++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables) ++ is set to true. ++ properties: ++ image: *image-spec ++ resources: *resources-spec ++ # FIXME: name mismatch, named service_account in kubespawner ++ serviceAccountName: ++ type: [string, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account). ++ startTimeout: ++ type: [integer, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout). ++ storage: ++ type: object ++ additionalProperties: false ++ required: [type, homeMountPath] ++ description: | ++ This section configures KubeSpawner directly to some extent but also ++ indirectly through Helm chart specific configuration options such as ++ [`singleuser.storage.type`](schema_singleuser.storage.type). ++ properties: ++ capacity: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.storage_capacity`. ++ ++ See the [KubeSpawner ++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html) ++ for more information. ++ dynamic: ++ type: object ++ additionalProperties: false ++ properties: ++ pvcNameTemplate: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.pvc_name_template` which will be the ++ resource name of the PVC created by KubeSpawner for each user ++ if needed. ++ storageAccessModes: ++ type: array ++ items: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.storage_access_modes`. ++ ++ See KubeSpawners documentation and [the k8s ++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) ++ for more information. ++ storageClass: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.storage_class`, which can be an ++ explicit StorageClass to dynamically provision storage for the ++ PVC that KubeSpawner will create. ++ ++ There is of a default StorageClass available in k8s clusters ++ for use if this is unspecified. ++ volumeNameTemplate: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.volume_name_template`, which is the ++ name to reference from the containers volumeMounts section. ++ extraLabels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Configures `KubeSpawner.storage_extra_labels`. Note that these ++ labels are set on the PVC during creation only and won't be ++ updated after creation. ++ extraVolumeMounts: *extraVolumeMounts-spec ++ extraVolumes: *extraVolumes-spec ++ homeMountPath: ++ type: string ++ description: | ++ The location within the container where the home folder storage ++ should be mounted. ++ static: ++ type: object ++ additionalProperties: false ++ properties: ++ pvcName: ++ type: [string, "null"] ++ description: | ++ Configures `KubeSpawner.pvc_claim_name` to reference ++ pre-existing storage. ++ subPath: ++ type: [string, "null"] ++ description: | ++ Configures the `subPath` field of a ++ `KubeSpawner.volume_mounts` entry added by the Helm chart. ++ ++ Path within the volume from which the container's volume ++ should be mounted. ++ type: ++ enum: [dynamic, static, none] ++ description: | ++ Decide if you want storage to be provisioned dynamically ++ (dynamic), or if you want to attach existing storage (static), or ++ don't want any storage to be attached (none). ++ allowPrivilegeEscalation: ++ type: [boolean, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation). ++ uid: ++ type: [integer, "null"] ++ description: | ++ Passthrough configuration for ++ [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid). ++ ++ This dictates as what user the main container will start up as. ++ ++ As an example of when this is needed, consider if you want to enable ++ sudo rights for some of your users. This can be done by starting up as ++ root, enabling it from the container in a startup script, and then ++ transitioning to the normal user. ++ ++ Default is 1000, set to null to use the container's default. ++ ++ scheduling: ++ type: object ++ additionalProperties: false ++ description: | ++ Objects for customizing the scheduling of various pods on the nodes and ++ related labels. ++ properties: ++ userScheduler: ++ type: object ++ additionalProperties: false ++ required: [enabled, plugins, pluginConfig, logLevel] ++ description: | ++ The user scheduler is making sure that user pods are scheduled ++ tight on nodes, this is useful for autoscaling of user node pools. ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Enables the user scheduler. ++ revisionHistoryLimit: *revisionHistoryLimit ++ replicas: ++ type: integer ++ description: | ++ You can have multiple schedulers to share the workload or improve ++ availability on node failure. ++ image: *image-spec ++ pdb: *pdb-spec ++ nodeSelector: *nodeSelector-spec ++ tolerations: *tolerations-spec ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the userScheduler pods. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra annotations to add to the user-scheduler pods. ++ containerSecurityContext: *containerSecurityContext-spec ++ logLevel: ++ type: integer ++ description: | ++ Corresponds to the verbosity level of logging made by the ++ kube-scheduler binary running within the user-scheduler pod. ++ plugins: ++ type: object ++ additionalProperties: true ++ description: | ++ These plugins refers to kube-scheduler plugins as documented ++ [here](https://kubernetes.io/docs/reference/scheduling/config/). ++ ++ The user-scheduler is really just a kube-scheduler configured in a ++ way to pack users tight on nodes using these plugins. See ++ values.yaml for information about the default plugins. ++ pluginConfig: ++ type: array ++ description: | ++ Individually activated plugins can be configured further. ++ resources: *resources-spec ++ serviceAccount: *serviceAccount ++ extraPodSpec: *extraPodSpec-spec ++ podPriority: ++ type: object ++ additionalProperties: false ++ description: | ++ Pod Priority is used to allow real users evict user placeholder pods ++ that in turn by entering a Pending state can trigger a scale up by a ++ cluster autoscaler. ++ ++ Having this option enabled only make sense if the following conditions ++ are met: ++ ++ 1. A cluster autoscaler is installed. ++ 2. user-placeholer pods are configured to have a priority equal or ++ higher than the cluster autoscaler's "priority cutoff" so that the ++ cluster autoscaler scales up a node in advance for a pending user ++ placeholder pod. ++ 3. Normal user pods have a higher priority than the user-placeholder ++ pods. ++ 4. Image puller pods have a priority between normal user pods and ++ user-placeholder pods. ++ ++ Note that if the default priority cutoff if not configured on cluster ++ autoscaler, it will currently default to 0, and that in the future ++ this is meant to be lowered. If your cloud provider is installing the ++ cluster autoscaler for you, they may also configure this specifically. ++ ++ Recommended settings for a cluster autoscaler... ++ ++ ... with a priority cutoff of -10 (GKE): ++ ++ ```yaml ++ podPriority: ++ enabled: true ++ globalDefault: false ++ defaultPriority: 0 ++ imagePullerPriority: -5 ++ userPlaceholderPriority: -10 ++ ``` ++ ++ ... with a priority cutoff of 0: ++ ++ ```yaml ++ podPriority: ++ enabled: true ++ globalDefault: true ++ defaultPriority: 10 ++ imagePullerPriority: 5 ++ userPlaceholderPriority: 0 ++ ``` ++ properties: ++ enabled: ++ type: boolean ++ globalDefault: ++ type: boolean ++ description: | ++ Warning! This will influence all pods in the cluster. ++ ++ The priority a pod usually get is 0. But this can be overridden ++ with a PriorityClass resource if it is declared to be the global ++ default. This configuration option allows for the creation of such ++ global default. ++ defaultPriority: ++ type: integer ++ description: | ++ The actual value for the default pod priority. ++ imagePullerPriority: ++ type: integer ++ description: | ++ The actual value for the [hook|continuous]-image-puller pods' priority. ++ userPlaceholderPriority: ++ type: integer ++ description: | ++ The actual value for the user-placeholder pods' priority. ++ userPlaceholder: ++ type: object ++ additionalProperties: false ++ description: | ++ User placeholders simulate users but will thanks to PodPriority be ++ evicted by the cluster autoscaler if a real user shows up. In this way ++ placeholders allow you to create a headroom for the real users and ++ reduce the risk of a user having to wait for a node to be added. Be ++ sure to use the the continuous image puller as well along with ++ placeholders, so the images are also available when real users arrive. ++ ++ To test your setup efficiently, you can adjust the amount of user ++ placeholders with the following command: ++ ```sh ++ # Configure to have 3 user placeholders ++ kubectl scale sts/user-placeholder --replicas=3 ++ ``` ++ properties: ++ enabled: ++ type: boolean ++ image: *image-spec ++ revisionHistoryLimit: *revisionHistoryLimit ++ replicas: ++ type: integer ++ description: | ++ How many placeholder pods would you like to have? ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the userPlaceholder pods. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra annotations to add to the placeholder pods. ++ resources: ++ type: object ++ additionalProperties: true ++ description: | ++ Unless specified here, the placeholder pods will request the same ++ resources specified for the real singleuser pods. ++ containerSecurityContext: *containerSecurityContext-spec ++ corePods: ++ type: object ++ additionalProperties: false ++ description: | ++ These settings influence the core pods like the hub, proxy and ++ user-scheduler pods. ++ These settings influence all pods considered core pods, namely: ++ ++ - hub ++ - proxy ++ - autohttps ++ - hook-image-awaiter ++ - user-scheduler ++ ++ By defaults, the tolerations are: ++ ++ - hub.jupyter.org/dedicated=core:NoSchedule ++ - hub.jupyter.org_dedicated=core:NoSchedule ++ ++ Note that tolerations set here are combined with the respective ++ components dedicated tolerations, and that `_` is available in case ++ `/` isn't allowed in the clouds tolerations. ++ properties: ++ tolerations: *tolerations-spec ++ nodeAffinity: ++ type: object ++ additionalProperties: false ++ description: | ++ Where should pods be scheduled? Perhaps on nodes with a certain ++ label is preferred or even required? ++ properties: ++ matchNodePurpose: ++ enum: [ignore, prefer, require] ++ description: | ++ Decide if core pods *ignore*, *prefer* or *require* to ++ schedule on nodes with this label: ++ ``` ++ hub.jupyter.org/node-purpose=core ++ ``` ++ userPods: ++ type: object ++ additionalProperties: false ++ description: | ++ These settings influence all pods considered user pods, namely: ++ ++ - user-placeholder ++ - hook-image-puller ++ - continuous-image-puller ++ - jupyter- ++ ++ By defaults, the tolerations are: ++ ++ - hub.jupyter.org/dedicated=core:NoSchedule ++ - hub.jupyter.org_dedicated=core:NoSchedule ++ ++ Note that tolerations set here are combined with the respective ++ components dedicated tolerations, and that `_` is available in case ++ `/` isn't allowed in the clouds tolerations. ++ properties: ++ tolerations: *tolerations-spec ++ nodeAffinity: ++ type: object ++ additionalProperties: false ++ description: | ++ Where should pods be scheduled? Perhaps on nodes with a certain ++ label is preferred or even required? ++ properties: ++ matchNodePurpose: ++ enum: [ignore, prefer, require] ++ description: | ++ Decide if user pods *ignore*, *prefer* or *require* to ++ schedule on nodes with this label: ++ ``` ++ hub.jupyter.org/node-purpose=user ++ ``` ++ ++ ingress: ++ type: object ++ additionalProperties: false ++ required: [enabled] ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Enable the creation of a Kubernetes Ingress to proxy-public service. ++ ++ See [Advanced Topics — Zero to JupyterHub with Kubernetes ++ 0.7.0 documentation](ingress) ++ for more details. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Annotations to apply to the Ingress resource. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) ++ for more details about annotations. ++ ingressClassName: ++ type: [string, "null"] ++ description: | ++ Maps directly to the Ingress resource's `spec.ingressClassName``. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) ++ for more details. ++ hosts: ++ type: array ++ description: | ++ List of hosts to route requests to the proxy. ++ pathSuffix: ++ type: [string, "null"] ++ description: | ++ Suffix added to Ingress's routing path pattern. ++ ++ Specify `*` if your ingress matches path by glob pattern. ++ pathType: ++ enum: [Prefix, Exact, ImplementationSpecific] ++ description: | ++ The path type to use. The default value is 'Prefix'. ++ ++ See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types) ++ for more details about path types. ++ tls: ++ type: array ++ description: | ++ TLS configurations for Ingress. ++ ++ See [the Kubernetes ++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) ++ for more details about annotations. ++ ++ prePuller: ++ type: object ++ additionalProperties: false ++ required: [hook, continuous] ++ properties: ++ revisionHistoryLimit: *revisionHistoryLimit ++ labels: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Extra labels to add to the pre puller job pods. ++ ++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) ++ to learn more about labels. ++ annotations: ++ type: object ++ additionalProperties: false ++ patternProperties: *labels-and-annotations-patternProperties ++ description: | ++ Annotations to apply to the hook and continous image puller pods. One example use case is to ++ disable istio sidecars which could interfere with the image pulling. ++ resources: ++ type: object ++ additionalProperties: true ++ description: | ++ These are standard Kubernetes resources with requests and limits for ++ cpu and memory. They will be used on the containers in the pods ++ pulling images. These should be set extremely low as the containers ++ shut down directly or is a pause container that just idles. ++ ++ They were made configurable as usage of ResourceQuota may require ++ containers in the namespace to have explicit resources set. ++ extraTolerations: *tolerations-spec ++ hook: ++ type: object ++ additionalProperties: false ++ required: [enabled] ++ description: | ++ See the [*optimization ++ section*](pulling-images-before-users-arrive) ++ for more details. ++ properties: ++ enabled: ++ type: boolean ++ pullOnlyOnChanges: ++ type: boolean ++ description: | ++ Pull only if changes have been made to the images to pull, or more ++ accurately if the hook-image-puller daemonset has changed in any ++ way. ++ podSchedulingWaitDuration: ++ description: | ++ The `hook-image-awaiter` has a criteria to await all the ++ `hook-image-puller` DaemonSet's pods to both schedule and finish ++ their image pulling. This flag can be used to relax this criteria ++ to instead only await the pods that _has already scheduled_ to ++ finish image pulling after a certain duration. ++ ++ The value of this is that sometimes the newly created ++ `hook-image-puller` pods cannot be scheduled because nodes are ++ full, and then it probably won't make sense to block a `helm ++ upgrade`. ++ ++ An infinite duration to wait for pods to schedule can be ++ represented by `-1`. This was the default behavior of version ++ 0.9.0 and earlier. ++ type: integer ++ nodeSelector: *nodeSelector-spec ++ tolerations: *tolerations-spec ++ containerSecurityContext: *containerSecurityContext-spec ++ image: *image-spec ++ resources: *resources-spec ++ serviceAccount: *serviceAccount ++ continuous: ++ type: object ++ additionalProperties: false ++ required: [enabled] ++ description: | ++ See the [*optimization ++ section*](pulling-images-before-users-arrive) ++ for more details. ++ ++ ```{note} ++ If used with a Cluster Autoscaler (an autoscaling node pool), also add ++ user-placeholders and enable pod priority. ++ ``` ++ properties: ++ enabled: ++ type: boolean ++ pullProfileListImages: ++ type: boolean ++ description: | ++ The singleuser.profileList configuration can provide a selection of ++ images. This option determines if all images identified there should ++ be pulled, both by the hook and continuous pullers. ++ ++ Images are looked for under `kubespawner_override`, and also ++ `profile_options.choices.kubespawner_override` since version 3.2.0. ++ ++ The reason to disable this, is that if you have for example 10 images ++ which start pulling in order from 1 to 10, a user that arrives and ++ wants to start a pod with image number 10 will need to wait for all ++ images to be pulled, and then it may be preferable to just let the ++ user arriving wait for a single image to be pulled on arrival. ++ extraImages: ++ type: object ++ additionalProperties: false ++ description: | ++ See the [*optimization section*](images-that-will-be-pulled) for more ++ details. ++ ++ ```yaml ++ prePuller: ++ extraImages: ++ my-extra-image-i-want-pulled: ++ name: jupyter/all-spark-notebook ++ tag: 2343e33dec46 ++ ``` ++ patternProperties: ++ ".*": ++ type: object ++ additionalProperties: false ++ required: [name, tag] ++ properties: ++ name: ++ type: string ++ tag: ++ type: string ++ containerSecurityContext: *containerSecurityContext-spec ++ pause: ++ type: object ++ additionalProperties: false ++ description: | ++ The image-puller pods rely on initContainer to pull all images, and ++ their actual container when they are done is just running a `pause` ++ container. These are settings for that pause container. ++ properties: ++ containerSecurityContext: *containerSecurityContext-spec ++ image: *image-spec ++ ++ custom: ++ type: object ++ additionalProperties: true ++ description: | ++ Additional values to pass to the Hub. ++ JupyterHub will not itself look at these, ++ but you can read values in your own custom config via `hub.extraConfig`. ++ For example: ++ ++ ```yaml ++ custom: ++ myHost: "https://example.horse" ++ hub: ++ extraConfig: ++ myConfig.py: | ++ c.MyAuthenticator.host = get_config("custom.myHost") ++ ``` ++ ++ cull: ++ type: object ++ additionalProperties: false ++ required: [enabled] ++ description: | ++ The ++ [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) ++ can run as a JupyterHub managed service to _cull_ running servers. ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Enable/disable use of jupyter-idle-culler. ++ users: ++ type: [boolean, "null"] ++ description: See the `--cull-users` flag. ++ adminUsers: ++ type: [boolean, "null"] ++ description: See the `--cull-admin-users` flag. ++ removeNamedServers: ++ type: [boolean, "null"] ++ description: See the `--remove-named-servers` flag. ++ timeout: ++ type: [integer, "null"] ++ description: See the `--timeout` flag. ++ every: ++ type: [integer, "null"] ++ description: See the `--cull-every` flag. ++ concurrency: ++ type: [integer, "null"] ++ description: See the `--concurrency` flag. ++ maxAge: ++ type: [integer, "null"] ++ description: See the `--max-age` flag. ++ ++ debug: ++ type: object ++ additionalProperties: false ++ required: [enabled] ++ properties: ++ enabled: ++ type: boolean ++ description: | ++ Increases the loglevel throughout the resources in the Helm chart. ++ ++ rbac: ++ type: object ++ additionalProperties: false ++ required: [create] ++ properties: ++ enabled: ++ type: boolean ++ # This schema entry is needed to help us print a more helpful error ++ # message in NOTES.txt if hub.fsGid is set. ++ # ++ description: | ++ ````{note} ++ Removed in version 2.0.0. If you have been using `rbac.enable=false` ++ (strongly discouraged), then the equivalent configuration would be: ++ ++ ```yaml ++ rbac: ++ create: false ++ hub: ++ serviceAccount: ++ create: false ++ proxy: ++ traefik: ++ serviceAccount: ++ create: false ++ scheduling: ++ userScheduler: ++ serviceAccount: ++ create: false ++ prePuller: ++ hook: ++ serviceAccount: ++ create: false ++ ``` ++ ```` ++ create: ++ type: boolean ++ description: | ++ Decides if (Cluster)Role and (Cluster)RoleBinding resources are ++ created and bound to the configured serviceAccounts. ++ ++ global: ++ type: object ++ additionalProperties: true ++ properties: ++ safeToShowValues: ++ type: boolean ++ description: | ++ A flag that should only be set to true temporarily when experiencing a ++ deprecation message that contain censored content that you wish to ++ reveal. +diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml +index 2f5cbca..41e108d 100755 +--- a/applications/jupyterhub/deploy/values.yaml ++++ b/applications/jupyterhub/deploy/values.yaml +@@ -1,4 +1,4 @@ +-harness: ++harness: # EDIT: CLOUDHARNESS + subdomain: hub + service: + auto: false +@@ -31,6 +31,11 @@ harness: + fullnameOverride: "" + nameOverride: + ++# enabled is ignored by the jupyterhub chart itself, but a chart depending on ++# the jupyterhub chart conditionally can make use this config option as the ++# condition. ++enabled: ++ + # custom can contain anything you want to pass to the hub pod, as all passed + # Helm template values will be made available there. + custom: {} +@@ -54,10 +59,11 @@ imagePullSecrets: [] + # ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in + # the proxy pod. + hub: ++ revisionHistoryLimit: + config: + JupyterHub: + admin_access: true +- authenticator_class: keycloak ++ authenticator_class: keycloak # EDIT: CLOUDHARNESS + service: + type: ClusterIP + annotations: {} +@@ -68,7 +74,6 @@ hub: + baseUrl: / + cookieSecret: + initContainers: [] +- fsGid: 1000 + nodeSelector: {} + tolerations: [] + concurrentSpawnLimit: 64 +@@ -106,37 +111,38 @@ hub: + extraVolumes: [] + extraVolumeMounts: [] + image: +- name: jupyterhub/k8s-hub +- tag: "1.1.3" ++ name: quay.io/jupyterhub/k8s-hub ++ tag: "3.2.1" + pullPolicy: + pullSecrets: [] + resources: {} ++ podSecurityContext: ++ fsGroup: 1000 + containerSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + lifecycle: {} ++ loadRoles: {} + services: {} + pdb: + enabled: false + maxUnavailable: + minAvailable: 1 + networkPolicy: +- enabled: false ++ enabled: true + ingress: [] +- ## egress for JupyterHub already includes Kubernetes internal DNS and +- ## access to the proxy, but can be restricted further, but ensure to allow +- ## access to the Kubernetes API server that couldn't be pinned ahead of +- ## time. +- ## +- ## ref: https://stackoverflow.com/a/59016417/2220152 +- egress: +- - to: +- - ipBlock: +- cidr: 0.0.0.0/0 ++ egress: [] ++ egressAllowRules: ++ cloudMetadataServer: true ++ dnsPortsCloudMetadataServer: true ++ dnsPortsKubeSystemNamespace: true ++ dnsPortsPrivateIPs: true ++ nonPrivateIPs: true ++ privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [] +- allowNamedServers: true ++ allowNamedServers: true # EDIT: CLOUDHARNESS + namedServerLimitPerUser: + authenticatePrometheus: + redirectToServer: +@@ -163,11 +169,13 @@ hub: + timeoutSeconds: 1 + existingSecret: + serviceAccount: ++ create: true ++ name: + annotations: {} + extraPodSpec: {} + + rbac: +- enabled: true ++ create: true + + # proxy relates to the proxy pod, the proxy-public service, and the autohttps + # pod and proxy-http service. +@@ -202,7 +210,7 @@ proxy: + rollingUpdate: + # service relates to the proxy-public service + service: +- type: NodePort ++ type: NodePort # EDIT: CLOUDHARNESS + labels: {} + annotations: {} + nodePorts: +@@ -215,13 +223,17 @@ proxy: + # chp relates to the proxy pod, which is responsible for routing traffic based + # on dynamic configuration sent from JupyterHub to CHP's REST API. + chp: ++ revisionHistoryLimit: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: +- name: jupyterhub/configurable-http-proxy +- tag: 4.5.0 # https://github.com/jupyterhub/configurable-http-proxy/releases ++ name: quay.io/jupyterhub/configurable-http-proxy ++ # tag is automatically bumped to new patch versions by the ++ # watch-dependencies.yaml workflow. ++ # ++ tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags + pullPolicy: + pullSecrets: [] + extraCommandLineFlags: [] +@@ -229,11 +241,14 @@ proxy: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 ++ failureThreshold: 30 ++ timeoutSeconds: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 2 + failureThreshold: 1000 ++ timeoutSeconds: 1 + resources: {} + defaultTarget: + errorTarget: +@@ -241,12 +256,16 @@ proxy: + nodeSelector: {} + tolerations: [] + networkPolicy: +- enabled: false ++ enabled: true + ingress: [] +- egress: +- - to: +- - ipBlock: +- cidr: 0.0.0.0/0 ++ egress: [] ++ egressAllowRules: ++ cloudMetadataServer: true ++ dnsPortsCloudMetadataServer: true ++ dnsPortsKubeSystemNamespace: true ++ dnsPortsPrivateIPs: true ++ nonPrivateIPs: true ++ privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [http, https] + pdb: +@@ -257,13 +276,17 @@ proxy: + # traefik relates to the autohttps pod, which is responsible for TLS + # termination when proxy.https.type=letsencrypt. + traefik: ++ revisionHistoryLimit: + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + name: traefik +- tag: v2.4.11 # ref: https://hub.docker.com/_/traefik?tab=tags ++ # tag is automatically bumped to new patch versions by the ++ # watch-dependencies.yaml workflow. ++ # ++ tag: "v2.10.7" # ref: https://hub.docker.com/_/traefik?tab=tags + pullPolicy: + pullSecrets: [] + hsts: +@@ -272,6 +295,7 @@ proxy: + maxAge: 15724800 # About 6 months + resources: {} + labels: {} ++ extraInitContainers: [] + extraEnv: {} + extraVolumes: [] + extraVolumeMounts: [] +@@ -283,10 +307,14 @@ proxy: + networkPolicy: + enabled: true + ingress: [] +- egress: +- - to: +- - ipBlock: +- cidr: 0.0.0.0/0 ++ egress: [] ++ egressAllowRules: ++ cloudMetadataServer: true ++ dnsPortsCloudMetadataServer: true ++ dnsPortsKubeSystemNamespace: true ++ dnsPortsPrivateIPs: true ++ nonPrivateIPs: true ++ privateIPs: true + interNamespaceAccessLabels: ignore + allowedIngressPorts: [http, https] + pdb: +@@ -294,6 +322,8 @@ proxy: + maxUnavailable: + minAvailable: 1 + serviceAccount: ++ create: true ++ name: + annotations: {} + extraPodSpec: {} + secretSync: +@@ -302,8 +332,8 @@ proxy: + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: +- name: jupyterhub/k8s-secret-sync +- tag: "1.1.3" ++ name: quay.io/jupyterhub/k8s-secret-sync ++ tag: "3.2.1" + pullPolicy: + pullSecrets: [] + resources: {} +@@ -342,29 +372,27 @@ singleuser: + preferred: [] + networkTools: + image: +- name: jupyterhub/k8s-network-tools +- tag: "1.1.3" ++ name: quay.io/jupyterhub/k8s-network-tools ++ tag: "3.2.1" + pullPolicy: + pullSecrets: [] ++ resources: {} + cloudMetadata: + # block set to true will append a privileged initContainer using the + # iptables to block the sensitive metadata server at the provided ip. +- blockWithIptables: false ++ blockWithIptables: true ++ ip: 169.254.169.254 + networkPolicy: +- enabled: false ++ enabled: true + ingress: [] +- egress: +- # Required egress to communicate with the hub and DNS servers will be +- # augmented to these egress rules. +- # +- # This default rule explicitly allows all outbound traffic from singleuser +- # pods, except to a typical IP used to return metadata that can be used by +- # someone with malicious intent. +- - to: +- - ipBlock: +- cidr: 0.0.0.0/0 +- except: +- - 169.254.169.254/32 ++ egress: [] ++ egressAllowRules: ++ cloudMetadataServer: false ++ dnsPortsCloudMetadataServer: true ++ dnsPortsKubeSystemNamespace: true ++ dnsPortsPrivateIPs: true ++ nonPrivateIPs: true ++ privateIPs: false + interNamespaceAccessLabels: ignore + allowedIngressPorts: [] + events: true +@@ -376,6 +404,7 @@ singleuser: + lifecycleHooks: {} + initContainers: [] + extraContainers: [] ++ allowPrivilegeEscalation: false + uid: 1000 + fsGid: 100 + serviceAccountName: +@@ -387,29 +416,29 @@ singleuser: + static: + pvcName: + subPath: "{username}" +- capacity: 10Mi +- homeMountPath: /home/workspace ++ capacity: 10Mi # EDIT: CLOUDHARNESS ++ homeMountPath: /home/workspace # EDIT: CLOUDHARNESS + dynamic: + storageClass: +- pvcNameTemplate: jupyter-{username} +- volumeNameTemplate: jupyter-{username} ++ pvcNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS ++ volumeNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS + storageAccessModes: [ReadWriteOnce] + image: +- name: jupyter/base-notebook +- tag: "hub-1.4.2" ++ name: quay.io/jupyterhub/k8s-singleuser-sample ++ tag: "3.2.1" + pullPolicy: + pullSecrets: [] + startTimeout: 300 + cpu: +- limit: 0.4 +- guarantee: 0.05 ++ limit: 0.4 # EDIT: CLOUDHARNESS ++ guarantee: 0.05 # EDIT: CLOUDHARNESS + memory: +- limit: 0.5G +- guarantee: 0.1G ++ limit: 0.5G # EDIT: CLOUDHARNESS ++ guarantee: 0.1G # EDIT: CLOUDHARNESS + extraResource: + limits: {} + guarantees: {} +- cmd: /usr/local/bin/start-singleuser.sh ++ cmd: jupyterhub-singleuser + defaultUrl: + extraPodConfig: {} + profileList: [] +@@ -417,74 +446,146 @@ singleuser: + # scheduling relates to the user-scheduler pods and user-placeholder pods. + scheduling: + userScheduler: +- enabled: false ++ enabled: false # EDIT: CLOUDHARNESS ++ revisionHistoryLimit: + replicas: 2 + logLevel: 4 ++ # plugins are configured on the user-scheduler to make us score how we ++ # schedule user pods in a way to help us schedule on the most busy node. By ++ # doing this, we help scale down more effectively. It isn't obvious how to ++ # enable/disable scoring plugins, and configure them, to accomplish this. ++ # + # plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1 ++ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations ++ # + plugins: + score: ++ # These scoring plugins are enabled by default according to ++ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins ++ # 2022-02-22. ++ # ++ # Enabled with high priority: ++ # - NodeAffinity ++ # - InterPodAffinity ++ # - NodeResourcesFit ++ # - ImageLocality ++ # Remains enabled with low default priority: ++ # - TaintToleration ++ # - PodTopologySpread ++ # - VolumeBinding ++ # Disabled for scoring: ++ # - NodeResourcesBalancedAllocation ++ # + disabled: +- - name: SelectorSpread +- - name: TaintToleration +- - name: PodTopologySpread ++ # We disable these plugins (with regards to scoring) to not interfere ++ # or complicate our use of NodeResourcesFit. + - name: NodeResourcesBalancedAllocation +- - name: NodeResourcesLeastAllocated + # Disable plugins to be allowed to enable them again with a different + # weight and avoid an error. +- - name: NodePreferAvoidPods + - name: NodeAffinity + - name: InterPodAffinity ++ - name: NodeResourcesFit + - name: ImageLocality + enabled: +- - name: NodePreferAvoidPods +- weight: 161051 + - name: NodeAffinity + weight: 14631 + - name: InterPodAffinity + weight: 1331 +- - name: NodeResourcesMostAllocated ++ - name: NodeResourcesFit + weight: 121 + - name: ImageLocality + weight: 11 ++ pluginConfig: ++ # Here we declare that we should optimize pods to fit based on a ++ # MostAllocated strategy instead of the default LeastAllocated. ++ - name: NodeResourcesFit ++ args: ++ scoringStrategy: ++ resources: ++ - name: cpu ++ weight: 1 ++ - name: memory ++ weight: 1 ++ type: MostAllocated + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: + # IMPORTANT: Bumping the minor version of this binary should go hand in +- # hand with an inspection of the user-scheduelrs RBAC resources +- # that we have forked. +- name: k8s.gcr.io/kube-scheduler +- tag: v1.19.13 # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md ++ # hand with an inspection of the user-scheduelr's RBAC ++ # resources that we have forked in ++ # templates/scheduling/user-scheduler/rbac.yaml. ++ # ++ # Debugging advice: ++ # ++ # - Is configuration of kube-scheduler broken in ++ # templates/scheduling/user-scheduler/configmap.yaml? ++ # ++ # - Is the kube-scheduler binary's compatibility to work ++ # against a k8s api-server that is too new or too old? ++ # ++ # - You can update the GitHub workflow that runs tests to ++ # include "deploy/user-scheduler" in the k8s namespace report ++ # and reduce the user-scheduler deployments replicas to 1 in ++ # dev-config.yaml to get relevant logs from the user-scheduler ++ # pods. Inspect the "Kubernetes namespace report" action! ++ # ++ # - Typical failures are that kube-scheduler fails to search for ++ # resources via its "informers", and won't start trying to ++ # schedule pods before they succeed which may require ++ # additional RBAC permissions or that the k8s api-server is ++ # aware of the resources. ++ # ++ # - If "successfully acquired lease" can be seen in the logs, it ++ # is a good sign kube-scheduler is ready to schedule pods. ++ # ++ name: registry.k8s.io/kube-scheduler ++ # tag is automatically bumped to new patch versions by the ++ # watch-dependencies.yaml workflow. The minor version is pinned in the ++ # workflow, and should be updated there if a minor version bump is done ++ # here. We aim to stay around 1 minor version behind the latest k8s ++ # version. ++ # ++ tag: "v1.28.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG + pullPolicy: + pullSecrets: [] + nodeSelector: {} + tolerations: [] ++ labels: {} ++ annotations: {} + pdb: + enabled: true + maxUnavailable: 1 + minAvailable: + resources: {} + serviceAccount: ++ create: true ++ name: + annotations: {} + extraPodSpec: {} + podPriority: + enabled: false + globalDefault: false + defaultPriority: 0 ++ imagePullerPriority: -5 + userPlaceholderPriority: -10 + userPlaceholder: + enabled: true + image: +- name: k8s.gcr.io/pause +- # tag's can be updated by inspecting the output of the command: +- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags ++ name: registry.k8s.io/pause ++ # tag is automatically bumped to new patch versions by the ++ # watch-dependencies.yaml workflow. + # + # If you update this, also update prePuller.pause.image.tag +- tag: "3.5" ++ # ++ tag: "3.9" + pullPolicy: + pullSecrets: [] ++ revisionHistoryLimit: + replicas: 0 ++ labels: {} ++ annotations: {} + containerSecurityContext: + runAsUser: 65534 # nobody user + runAsGroup: 65534 # nobody group +@@ -517,6 +618,8 @@ scheduling: + + # prePuller relates to the hook|continuous-image-puller DaemonsSets + prePuller: ++ revisionHistoryLimit: ++ labels: {} + annotations: {} + resources: {} + containerSecurityContext: +@@ -530,8 +633,8 @@ prePuller: + pullOnlyOnChanges: true + # image and the configuration below relates to the hook-image-awaiter Job + image: +- name: jupyterhub/k8s-image-awaiter +- tag: "1.1.3" ++ name: quay.io/jupyterhub/k8s-image-awaiter ++ tag: "3.2.1" + pullPolicy: + pullSecrets: [] + containerSecurityContext: +@@ -543,6 +646,8 @@ prePuller: + tolerations: [] + resources: {} + serviceAccount: ++ create: true ++ name: + annotations: {} + continuous: + enabled: true +@@ -554,18 +659,20 @@ prePuller: + runAsGroup: 65534 # nobody group + allowPrivilegeEscalation: false + image: +- name: k8s.gcr.io/pause +- # tag's can be updated by inspecting the output of the command: +- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags ++ name: registry.k8s.io/pause ++ # tag is automatically bumped to new patch versions by the ++ # watch-dependencies.yaml workflow. + # + # If you update this, also update scheduling.userPlaceholder.image.tag +- tag: "3.5" ++ # ++ tag: "3.9" + pullPolicy: + pullSecrets: [] + + ingress: + enabled: false + annotations: {} ++ ingressClassName: + hosts: [] + pathSuffix: + pathType: Prefix +@@ -581,7 +688,8 @@ ingress: + cull: + enabled: true + users: false # --cull-users +- removeNamedServers: true # --remove-named-servers ++ adminUsers: true # --cull-admin-users ++ removeNamedServers: true # EDIT: CLOUDHARNESS + timeout: 3600 # --timeout + every: 600 # --cull-every + concurrency: 10 # --concurrency +diff --git a/applications/jupyterhub/zero-to-jupyterhub-k8s b/applications/jupyterhub/zero-to-jupyterhub-k8s +new file mode 160000 +index 0000000..c92c123 +--- /dev/null ++++ b/applications/jupyterhub/zero-to-jupyterhub-k8s +@@ -0,0 +1 @@ ++Subproject commit c92c12374795e84f36f5f16c4e8b8a448ad2f230-dirty diff --git a/applications/jupyterhub/update.sh b/applications/jupyterhub/update.sh new file mode 100644 index 00000000..cddf6899 --- /dev/null +++ b/applications/jupyterhub/update.sh @@ -0,0 +1,28 @@ +git clone -n git@github.com:jupyterhub/zero-to-jupyterhub-k8s.git +git checkout jupyterhub +git checkout chartpress.yaml +pip install chartpress +cd zero-to-jupyterhub-k8s +chartpress -t $1 +cd .. +cp -R zero-to-jupyterhub-k8s/jupyterhub/templates/* deploy/templates +cp zero-to-jupyterhub-k8s/jupyterhub/files/hub/* deploy/resources/hub +cp zero-to-jupyterhub-k8s/jupyterhub/values* deploy +cd deploy + +rm -Rf templates/proxy/autohttps # Proxy is not used as node balancer +rm templates/ingress.yaml # Default cloudharness ingress is used +# Command to replace everything like files/hub/ inside deploy/templates with resources/jupyterhub/hub/ +find templates -type f -exec sed -i 's/files\/hub/resources\/jupyterhub\/hub/g' {} \; + +# replace .Values.hub. with .Values.hub.config with .Values.apps.jupyterhub.hub +find templates -type f -exec sed -i 's/.Values./.Values.apps.jupyterhub./g' {} \; + +# replace .Values.apps.jupyterhub.hub.image with .Values.apps.jupyterhub.harness.deployment.image +find templates -type f -exec sed -i 's/{{ .Values.apps.jupyterhub.hub.image.name }}:{{ .Values.apps.jupyterhub.hub.image.tag }}/{{ .Values.apps.jupyterhub.harness.deployment.image }}/g' {} \; + + + +find templates -type f -exec sed -i 's$.Template.BasePath "/hub$.Template.BasePath "/jupyterhub/hub$g' {} \; +find templates -type f -exec sed -i 's$.Template.BasePath "/proxy$.Template.BasePath "/jupyterhub/proxy$g' {} \; +find templates -type f -exec sed -i 's$.Template.BasePath "/scheduling$.Template.BasePath "/jupyterhub/scheduling$g' {} \; diff --git a/deployment/codefresh-test-local.yaml b/deployment/codefresh-test-local.yaml index 612e214b..19a91c83 100644 --- a/deployment/codefresh-test-local.yaml +++ b/deployment/codefresh-test-local.yaml @@ -32,9 +32,8 @@ steps: working_directory: . commands: - bash cloud-harness/install.sh - - harness-deployment . -n test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}} - -d ${{CF_SHORT_REVISION}}.${{DOMAIN}} -r ${{REGISTRY}} -rs ${{REGISTRY_SECRET}} - -e test-local --write-env -N -i samples + - harness-deployment . -n test-${{NAMESPACE_BASENAME}} -d ${{DOMAIN}} -r ${{REGISTRY}} + -rs ${{REGISTRY_SECRET}} -e test-local --write-env -N -i jupyterhub - cat deployment/.env >> ${{CF_VOLUME_PATH}}/env_vars_to_export - cat ${{CF_VOLUME_PATH}}/env_vars_to_export prepare_deployment_view: @@ -72,33 +71,11 @@ steps: == true forceNoCache: includes('${{CLOUDHARNESS_BASE_TAG_FORCE_BUILD}}', '{{CLOUDHARNESS_BASE_TAG_FORCE_BUILD}}') == false - cloudharness-frontend-build: - type: build - stage: build - dockerfile: infrastructure/base-images/cloudharness-frontend-build/Dockerfile - registry: '${{CODEFRESH_REGISTRY}}' - buildkit: true - build_arguments: - - DOMAIN=${{DOMAIN}} - - NOCACHE=${{CF_BUILD_ID}} - - REGISTRY=${{REGISTRY}}/cloudharness/ - image_name: cloudharness/cloudharness-frontend-build - title: Cloudharness frontend build - working_directory: ./. - tag: '${{CLOUDHARNESS_FRONTEND_BUILD_TAG}}' - when: - condition: - any: - buildDoesNotExist: includes('${{CLOUDHARNESS_FRONTEND_BUILD_TAG_EXISTS}}', - '{{CLOUDHARNESS_FRONTEND_BUILD_TAG_EXISTS}}') == true - forceNoCache: includes('${{CLOUDHARNESS_FRONTEND_BUILD_TAG_FORCE_BUILD}}', - '{{CLOUDHARNESS_FRONTEND_BUILD_TAG_FORCE_BUILD}}') == false - build_static_images: - title: Build static images + build_application_images: type: parallel stage: build steps: - cloudharness-flask: + accounts: type: build stage: build dockerfile: Dockerfile @@ -108,23 +85,18 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/cloudharness-flask - title: Cloudharness flask - working_directory: ./infrastructure/common-images/cloudharness-flask - tag: '${{CLOUDHARNESS_FLASK_TAG}}' + image_name: cloudharness/accounts + title: Accounts + working_directory: ./applications/accounts + tag: '${{ACCOUNTS_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{CLOUDHARNESS_FLASK_TAG_EXISTS}}', '{{CLOUDHARNESS_FLASK_TAG_EXISTS}}') + buildDoesNotExist: includes('${{ACCOUNTS_TAG_EXISTS}}', '{{ACCOUNTS_TAG_EXISTS}}') == true - forceNoCache: includes('${{CLOUDHARNESS_FLASK_TAG_FORCE_BUILD}}', '{{CLOUDHARNESS_FLASK_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{ACCOUNTS_TAG_FORCE_BUILD}}', '{{ACCOUNTS_TAG_FORCE_BUILD}}') == false - build_application_images: - type: parallel - stage: build - steps: - nfsserver: + jupyterhub: type: build stage: build dockerfile: Dockerfile @@ -134,18 +106,19 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - image_name: cloudharness/nfsserver - title: Nfsserver - working_directory: ./applications/nfsserver - tag: '${{NFSSERVER_TAG}}' + - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} + image_name: cloudharness/jupyterhub + title: Jupyterhub + working_directory: ./applications/jupyterhub + tag: '${{JUPYTERHUB_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{NFSSERVER_TAG_EXISTS}}', '{{NFSSERVER_TAG_EXISTS}}') + buildDoesNotExist: includes('${{JUPYTERHUB_TAG_EXISTS}}', '{{JUPYTERHUB_TAG_EXISTS}}') == true - forceNoCache: includes('${{NFSSERVER_TAG_FORCE_BUILD}}', '{{NFSSERVER_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{JUPYTERHUB_TAG_FORCE_BUILD}}', '{{JUPYTERHUB_TAG_FORCE_BUILD}}') == false - accounts: + jupyterhub-zero-to-jupyterhub-k8s-images-secret-sync: type: build stage: build dockerfile: Dockerfile @@ -155,18 +128,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - image_name: cloudharness/accounts - title: Accounts - working_directory: ./applications/accounts - tag: '${{ACCOUNTS_TAG}}' + image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-secret-sync + title: Jupyterhub zero to jupyterhub k8s images secret sync + working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/secret-sync + tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{ACCOUNTS_TAG_EXISTS}}', '{{ACCOUNTS_TAG_EXISTS}}') + buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_EXISTS}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_EXISTS}}') == true - forceNoCache: includes('${{ACCOUNTS_TAG_FORCE_BUILD}}', '{{ACCOUNTS_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_FORCE_BUILD}}') == false - samples: + jupyterhub-zero-to-jupyterhub-k8s-images-image-awaiter: type: build stage: build dockerfile: Dockerfile @@ -176,20 +151,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_FRONTEND_BUILD=${{REGISTRY}}/cloudharness/cloudharness-frontend-build:${{CLOUDHARNESS_FRONTEND_BUILD_TAG}} - - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}} - image_name: cloudharness/samples - title: Samples - working_directory: ./applications/samples - tag: '${{SAMPLES_TAG}}' + image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-image-awaiter + title: Jupyterhub zero to jupyterhub k8s images image awaiter + working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/image-awaiter + tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{SAMPLES_TAG_EXISTS}}', '{{SAMPLES_TAG_EXISTS}}') + buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_EXISTS}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_EXISTS}}') == true - forceNoCache: includes('${{SAMPLES_TAG_FORCE_BUILD}}', '{{SAMPLES_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_FORCE_BUILD}}') == false - samples-print-file: + jupyterhub-zero-to-jupyterhub-k8s-images-singleuser-sample: type: build stage: build dockerfile: Dockerfile @@ -199,19 +174,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/samples-print-file - title: Samples print file - working_directory: ./applications/samples/tasks/print-file - tag: '${{SAMPLES_PRINT_FILE_TAG}}' + image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-singleuser-sample + title: Jupyterhub zero to jupyterhub k8s images singleuser sample + working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/singleuser-sample + tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{SAMPLES_PRINT_FILE_TAG_EXISTS}}', '{{SAMPLES_PRINT_FILE_TAG_EXISTS}}') + buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_EXISTS}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_EXISTS}}') == true - forceNoCache: includes('${{SAMPLES_PRINT_FILE_TAG_FORCE_BUILD}}', '{{SAMPLES_PRINT_FILE_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_FORCE_BUILD}}') == false - samples-secret: + jupyterhub-zero-to-jupyterhub-k8s-images-network-tools: type: build stage: build dockerfile: Dockerfile @@ -221,19 +197,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/samples-secret - title: Samples secret - working_directory: ./applications/samples/tasks/secret - tag: '${{SAMPLES_SECRET_TAG}}' + image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-network-tools + title: Jupyterhub zero to jupyterhub k8s images network tools + working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/network-tools + tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{SAMPLES_SECRET_TAG_EXISTS}}', '{{SAMPLES_SECRET_TAG_EXISTS}}') + buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_EXISTS}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_EXISTS}}') == true - forceNoCache: includes('${{SAMPLES_SECRET_TAG_FORCE_BUILD}}', '{{SAMPLES_SECRET_TAG_FORCE_BUILD}}') + forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_FORCE_BUILD}}') == false - samples-sum: + jupyterhub-zero-to-jupyterhub-k8s-images-hub: type: build stage: build dockerfile: Dockerfile @@ -243,19 +220,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/samples-sum - title: Samples sum - working_directory: ./applications/samples/tasks/sum - tag: '${{SAMPLES_SUM_TAG}}' + image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-hub + title: Jupyterhub zero to jupyterhub k8s images hub + working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/hub + tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{SAMPLES_SUM_TAG_EXISTS}}', '{{SAMPLES_SUM_TAG_EXISTS}}') - == true - forceNoCache: includes('${{SAMPLES_SUM_TAG_FORCE_BUILD}}', '{{SAMPLES_SUM_TAG_FORCE_BUILD}}') + buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_EXISTS}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_EXISTS}}') == + true + forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_FORCE_BUILD}}') == false - common: + jupyterhub-jupyterhub: type: build stage: build dockerfile: Dockerfile @@ -265,19 +243,18 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}} - image_name: cloudharness/common - title: Common - working_directory: ./applications/common/server - tag: '${{COMMON_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub + title: Jupyterhub jupyterhub + working_directory: ./applications/jupyterhub/src/jupyterhub + tag: '${{JUPYTERHUB_JUPYTERHUB_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{COMMON_TAG_EXISTS}}', '{{COMMON_TAG_EXISTS}}') - == true - forceNoCache: includes('${{COMMON_TAG_FORCE_BUILD}}', '{{COMMON_TAG_FORCE_BUILD}}') - == false - workflows-send-result-event: + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_TAG_FORCE_BUILD}}') == false + jupyterhub-jupyterhub-singleuser: type: build stage: build dockerfile: Dockerfile @@ -287,19 +264,18 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/workflows-send-result-event - title: Workflows send result event - working_directory: ./applications/workflows/tasks/send-result-event - tag: '${{WORKFLOWS_SEND_RESULT_EVENT_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-singleuser + title: Jupyterhub jupyterhub singleuser + working_directory: ./applications/jupyterhub/src/jupyterhub/singleuser + tag: '${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{WORKFLOWS_SEND_RESULT_EVENT_TAG_EXISTS}}', - '{{WORKFLOWS_SEND_RESULT_EVENT_TAG_EXISTS}}') == true - forceNoCache: includes('${{WORKFLOWS_SEND_RESULT_EVENT_TAG_FORCE_BUILD}}', - '{{WORKFLOWS_SEND_RESULT_EVENT_TAG_FORCE_BUILD}}') == false - workflows-extract-download: + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_FORCE_BUILD}}') == false + jupyterhub-jupyterhub-examples-service-fastapi: type: build stage: build dockerfile: Dockerfile @@ -309,18 +285,20 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - image_name: cloudharness/workflows-extract-download - title: Workflows extract download - working_directory: ./applications/workflows/tasks/extract-download - tag: '${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-examples-service-fastapi + title: Jupyterhub jupyterhub examples service fastapi + working_directory: ./applications/jupyterhub/src/jupyterhub/examples/service-fastapi + tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_EXISTS}}', - '{{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_EXISTS}}') == true - forceNoCache: includes('${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_FORCE_BUILD}}', - '{{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_FORCE_BUILD}}') == false - workflows-notify-queue: + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_EXISTS}}') == + true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_FORCE_BUILD}}') + == false + jupyterhub-jupyterhub-examples-postgres-db: type: build stage: build dockerfile: Dockerfile @@ -330,19 +308,19 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/workflows-notify-queue - title: Workflows notify queue - working_directory: ./applications/workflows/tasks/notify-queue - tag: '${{WORKFLOWS_NOTIFY_QUEUE_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-examples-postgres-db + title: Jupyterhub jupyterhub examples postgres db + working_directory: ./applications/jupyterhub/src/jupyterhub/examples/postgres/db + tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{WORKFLOWS_NOTIFY_QUEUE_TAG_EXISTS}}', - '{{WORKFLOWS_NOTIFY_QUEUE_TAG_EXISTS}}') == true - forceNoCache: includes('${{WORKFLOWS_NOTIFY_QUEUE_TAG_FORCE_BUILD}}', - '{{WORKFLOWS_NOTIFY_QUEUE_TAG_FORCE_BUILD}}') == false - workflows: + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_FORCE_BUILD}}') + == false + jupyterhub-jupyterhub-examples-postgres-hub: type: build stage: build dockerfile: Dockerfile @@ -352,50 +330,19 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}} - image_name: cloudharness/workflows - title: Workflows - working_directory: ./applications/workflows/server - tag: '${{WORKFLOWS_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-examples-postgres-hub + title: Jupyterhub jupyterhub examples postgres hub + working_directory: ./applications/jupyterhub/src/jupyterhub/examples/postgres/hub + tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{WORKFLOWS_TAG_EXISTS}}', '{{WORKFLOWS_TAG_EXISTS}}') - == true - forceNoCache: includes('${{WORKFLOWS_TAG_FORCE_BUILD}}', '{{WORKFLOWS_TAG_FORCE_BUILD}}') + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_FORCE_BUILD}}') == false - tests_unit: - stage: unittest - type: parallel - steps: - samples_ut: - title: Unit tests for samples - commands: - - pytest /usr/src/app/samples/test - image: '${{REGISTRY}}/cloudharness/samples:${{SAMPLES_TAG}}' - deployment: - stage: deploy - type: helm - working_directory: ./${{CF_REPO_NAME}} - title: Installing chart - arguments: - helm_version: 3.6.2 - chart_name: deployment/helm - release_name: test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}} - kube_context: '${{CLUSTER_NAME}}' - namespace: test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}} - chart_version: '${{CF_BUILD_ID}}' - cmd_ps: --timeout 600s --create-namespace - custom_value_files: - - ./deployment/helm/values.yaml - custom_values: - - apps_samples_harness_secrets_asecret=${{ASECRET}} - build_test_images: - title: Build test images - type: parallel - stage: qa - steps: - test-e2e: + jupyterhub-jupyterhub-demo-image: type: build stage: build dockerfile: Dockerfile @@ -405,128 +352,58 @@ steps: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - image_name: cloudharness/test-e2e - title: Test e2e - working_directory: ./test/test-e2e - tag: '${{TEST_E2E_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-demo-image + title: Jupyterhub jupyterhub demo image + working_directory: ./applications/jupyterhub/src/jupyterhub/demo-image + tag: '${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{TEST_E2E_TAG_EXISTS}}', '{{TEST_E2E_TAG_EXISTS}}') - == true - forceNoCache: includes('${{TEST_E2E_TAG_FORCE_BUILD}}', '{{TEST_E2E_TAG_FORCE_BUILD}}') - == false - test-api: + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_FORCE_BUILD}}') == false + jupyterhub-jupyterhub-onbuild: type: build stage: build - dockerfile: test/test-api/Dockerfile + dockerfile: Dockerfile registry: '${{CODEFRESH_REGISTRY}}' buildkit: true build_arguments: - DOMAIN=${{DOMAIN}} - NOCACHE=${{CF_BUILD_ID}} - REGISTRY=${{REGISTRY}}/cloudharness/ - - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}} - image_name: cloudharness/test-api - title: Test api - working_directory: ./. - tag: '${{TEST_API_TAG}}' + image_name: cloudharness/jupyterhub-jupyterhub-onbuild + title: Jupyterhub jupyterhub onbuild + working_directory: ./applications/jupyterhub/src/jupyterhub/onbuild + tag: '${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG}}' when: condition: any: - buildDoesNotExist: includes('${{TEST_API_TAG_EXISTS}}', '{{TEST_API_TAG_EXISTS}}') - == true - forceNoCache: includes('${{TEST_API_TAG_FORCE_BUILD}}', '{{TEST_API_TAG_FORCE_BUILD}}') - == false - wait_deployment: - stage: qa - title: Wait deployment to be ready - image: codefresh/kubectl - commands: - - kubectl config use-context ${{CLUSTER_NAME}} - - kubectl config set-context --current --namespace=test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}} - - kubectl rollout status deployment/accounts - - kubectl rollout status deployment/samples - - kubectl rollout status deployment/common - - kubectl rollout status deployment/workflows - - sleep 60 - tests_api: - stage: qa - title: Api tests - working_directory: /home/test - image: '${{REGISTRY}}/cloudharness/test-api:${{TEST_API_TAG}}' - fail_fast: false - commands: - - echo $APP_NAME - scale: - samples_api_test: - title: samples api test - volumes: - - '${{CF_REPO_NAME}}/applications/samples:/home/test' - - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml' - environment: - - APP_URL=https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api - - USERNAME=sample@testuser.com - - PASSWORD=test - commands: - - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url - https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all --skip-deprecated-operations - --hypothesis-suppress-health-check=too_slow --hypothesis-deadline=180000 - --request-timeout=180000 --hypothesis-max-examples=2 --show-errors-tracebacks - - pytest -v test/api - common_api_test: - title: common api test - volumes: - - '${{CF_REPO_NAME}}/applications/common:/home/test' - - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml' - environment: - - APP_URL=https://common.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api - commands: - - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url - https://common.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all - workflows_api_test: - title: workflows api test - volumes: - - '${{CF_REPO_NAME}}/applications/workflows:/home/test' - - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml' - environment: - - APP_URL=https://workflows.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api - commands: - - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url - https://workflows.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all - hooks: - on_fail: - exec: - image: alpine - commands: - - cf_export FAILED=failed - tests_e2e: - stage: qa - title: End to end tests - working_directory: /home/test - image: '${{REGISTRY}}/cloudharness/test-e2e:${{TEST_E2E_TAG}}' - fail_fast: false - commands: - - yarn test - scale: - samples_e2e_test: - title: samples e2e test - volumes: - - '${{CF_REPO_NAME}}/applications/samples/test/e2e:/home/test/__tests__/samples' - environment: - - APP_URL=https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}} - - USERNAME=sample@testuser.com - - PASSWORD=test - hooks: - on_fail: - exec: - image: alpine - commands: - - cf_export FAILED=failed + buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_EXISTS}}', + '{{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_EXISTS}}') == true + forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_FORCE_BUILD}}', + '{{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_FORCE_BUILD}}') == false + deployment: + stage: deploy + type: helm + working_directory: ./${{CF_REPO_NAME}} + title: Installing chart + arguments: + helm_version: 3.6.2 + chart_name: deployment/helm + release_name: test-${{NAMESPACE_BASENAME}} + kube_context: '${{CLUSTER_NAME}}' + namespace: test-${{NAMESPACE_BASENAME}} + chart_version: '${{CF_SHORT_REVISION}}' + cmd_ps: --timeout 600s --create-namespace + custom_value_files: + - ./deployment/helm/values.yaml + custom_values: [] approval: type: pending-approval stage: qa - title: Approve with failed tests + title: Approve anyway and delete deployment description: The pipeline will fail after ${{WAIT_ON_FAIL}} minutes timeout: timeUnit: minutes @@ -536,21 +413,11 @@ steps: condition: all: error: '"${{FAILED}}" == "failed"' - wait_on_fail: '${{WAIT_ON_FAIL}}' - dummy_end: - title: Dummy step - description: Without this, the on_finish hook is executed before the approval - step - image: python:3.9.10 - stage: qa - when: - condition: - all: - error: '"${{FAILED}}" == "failed"' - wait_on_fail: '${{WAIT_ON_FAIL}}' -hooks: - on_finish: + delete_deployment: + title: Delete deployment + description: The deployment is deleted at the end of the pipeline image: codefresh/kubectl + stage: qa commands: - kubectl config use-context ${{CLUSTER_NAME}} - - kubectl delete ns test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}} + - kubectl delete ns test-${{NAMESPACE_BASENAME}}