From 34f8ec6a29af7a499ce6ab1aab4a484380637889 Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Sat, 20 Jan 2024 10:41:21 +0100
Subject: [PATCH 001/210] CH-110 jupyterhub update wip
---
.../deploy/resources/hub/jupyterhub_config.py | 148 +-
.../jupyterhub/deploy/resources/hub/z2jh.py | 21 +-
.../jupyterhub/deploy/templates/NOTES.txt | 158 +
.../deploy/templates/_helpers-auth-rework.tpl | 18 +-
.../deploy/templates/_helpers-names.tpl | 62 +-
.../deploy/templates/_helpers-netpol.tpl | 101 +
.../jupyterhub/deploy/templates/_helpers.tpl | 63 +-
.../deploy/templates/hub/configmap.yaml | 1 +
.../deploy/templates/hub/deployment.yaml | 24 +-
.../deploy/templates/hub/netpol.yaml | 25 +-
.../jupyterhub/deploy/templates/hub/pdb.yaml | 4 -
.../jupyterhub/deploy/templates/hub/rbac.yaml | 15 +-
.../deploy/templates/hub/serviceaccount.yaml | 12 +
.../deploy/templates/image-pull-secret.yaml | 15 +
.../image-puller/_helpers-daemonset.tpl | 51 +-
.../deploy/templates/image-puller/job.yaml | 13 +-
.../templates/image-puller/priorityclass.yaml | 18 +
.../deploy/templates/image-puller/rbac.yaml | 27 +-
.../image-puller/serviceaccount.yaml | 21 +
.../templates/proxy/autohttps/_README.txt | 9 -
.../templates/proxy/autohttps/configmap.yaml | 28 -
.../templates/proxy/autohttps/deployment.yaml | 141 -
.../templates/proxy/autohttps/rbac.yaml | 40 -
.../templates/proxy/autohttps/service.yaml | 25 -
.../deploy/templates/proxy/deployment.yaml | 14 +-
.../deploy/templates/proxy/netpol.yaml | 24 +-
.../deploy/templates/proxy/pdb.yaml | 4 -
.../deploy/templates/proxy/service.yaml | 9 +-
.../templates/scheduling/priorityclass.yaml | 13 -
.../scheduling/user-placeholder/pdb.yaml | 4 -
.../user-placeholder/priorityclass.yaml | 13 -
.../user-placeholder/statefulset.yaml | 15 +-
.../scheduling/user-scheduler/configmap.yaml | 20 +-
.../scheduling/user-scheduler/deployment.yaml | 31 +-
.../scheduling/user-scheduler/pdb.yaml | 4 -
.../scheduling/user-scheduler/rbac.yaml | 78 +-
.../user-scheduler/serviceaccount.yaml | 14 +
.../deploy/templates/singleuser/netpol.yaml | 39 +-
.../deploy/templates/singleuser/secret.yaml | 17 +
.../jupyterhub/deploy/values.schema.yaml | 3014 +++++++++++++++++
applications/jupyterhub/deploy/values.yaml | 276 +-
.../jupyterhub/zero-to-jupyterhub-k8s | 1 +
42 files changed, 4013 insertions(+), 617 deletions(-)
create mode 100644 applications/jupyterhub/deploy/templates/NOTES.txt
create mode 100644 applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
create mode 100644 applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
create mode 100644 applications/jupyterhub/deploy/templates/image-pull-secret.yaml
create mode 100644 applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
create mode 100644 applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
delete mode 100755 applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
delete mode 100755 applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
delete mode 100755 applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
delete mode 100755 applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
delete mode 100755 applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
create mode 100644 applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
create mode 100644 applications/jupyterhub/deploy/templates/singleuser/secret.yaml
create mode 100644 applications/jupyterhub/deploy/values.schema.yaml
create mode 160000 applications/jupyterhub/zero-to-jupyterhub-k8s
diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
index d4b3cee2..8ec801ee 100755
--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
@@ -1,9 +1,17 @@
+# load the config object (satisfies linters)
+c = get_config() # noqa
+
+import glob
import os
import re
import sys
-import logging
+from jupyterhub.utils import url_path_join
+from kubernetes_asyncio import client
from tornado.httpclient import AsyncHTTPClient
+
+#CLOUDHARNESS: EDIT START
+import logging
from kubernetes import client
from jupyterhub.utils import url_path_join
@@ -12,7 +20,7 @@
harness_hub() # activates harness hooks on jupyterhub
except Exception as e:
logging.error("could not import harness_jupyter", exc_info=True)
-
+# CLOUDHARNESS: EDIT END
# Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
configuration_directory = os.path.dirname(os.path.realpath(__file__))
@@ -20,39 +28,13 @@
from z2jh import (
get_config,
- set_config_if_not_none,
get_name,
get_name_env,
get_secret_value,
+ set_config_if_not_none,
)
-print('Base url is', c.JupyterHub.get('base_url', '/'))
-
-# Configure JupyterHub to use the curl backend for making HTTP requests,
-# rather than the pure-python implementations. The default one starts
-# being too slow to make a large number of requests to the proxy API
-# at the rate required.
-AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
-
-c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
-
-# Connect to a proxy running in a different pod
-c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
-c.ConfigurableHTTPProxy.should_start = False
-
-# Do not shut down user pods when hub is restarted
-c.JupyterHub.cleanup_servers = False
-
-# Check that the proxy has routes appropriately setup
-c.JupyterHub.last_activity_interval = 60
-
-# Don't wait at all before redirecting a spawning user to the progress page
-c.JupyterHub.tornado_settings = {
- 'slow_spawn_timeout': 0,
-}
-
-
def camelCaseify(s):
"""convert snake_case to camelCase
@@ -173,6 +155,7 @@ def camelCaseify(s):
("events_enabled", "events"),
("extra_labels", None),
("extra_annotations", None),
+ # ("allow_privilege_escalation", None), # Managed manually below
("uid", None),
("fs_gid", None),
("service_account", "serviceAccountName"),
@@ -206,10 +189,19 @@ def camelCaseify(s):
if image:
tag = get_config("singleuser.image.tag")
if tag:
- image = "{}:{}".format(image, tag)
+ image = f"{image}:{tag}"
c.KubeSpawner.image = image
+# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a
+# property where None, False, and True all are valid values that users of the
+# Helm chart may want to set, we can't use the set_config_if_not_none helper
+# function as someone may want to override the default False value to None.
+#
+c.KubeSpawner.allow_privilege_escalation = get_config(
+ "singleuser.allowPrivilegeEscalation"
+)
+
# Combine imagePullSecret.create (single), imagePullSecrets (list), and
# singleuser.image.pullSecrets (list).
image_pull_secrets = []
@@ -255,7 +247,7 @@ def camelCaseify(s):
pass
else:
raise ValueError(
- "Unrecognized value for matchNodePurpose: %r" % match_node_purpose
+ f"Unrecognized value for matchNodePurpose: {match_node_purpose}"
)
# Combine the common tolerations for user pods with singleuser tolerations
@@ -271,7 +263,7 @@ def camelCaseify(s):
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
c.KubeSpawner.pvc_name_template = pvc_name_template
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
- c.KubeSpawner.storage_pvc_ensure = False
+ c.KubeSpawner.storage_pvc_ensure = True
set_config_if_not_none(
c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass"
)
@@ -354,41 +346,62 @@ def camelCaseify(s):
)
c.JupyterHub.services = []
+c.JupyterHub.load_roles = []
+# jupyterhub-idle-culler's permissions are scoped to what it needs only, see
+# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions.
+#
if get_config("cull.enabled", False):
+ jupyterhub_idle_culler_role = {
+ "name": "jupyterhub-idle-culler",
+ "scopes": [
+ "list:users",
+ "read:users:activity",
+ "read:servers",
+ "delete:servers",
+ # "admin:users", # dynamically added if --cull-users is passed
+ ],
+ # assign the role to a jupyterhub service, so it gains these permissions
+ "services": ["jupyterhub-idle-culler"],
+ }
+
cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"]
base_url = c.JupyterHub.get("base_url", "/")
cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api"))
cull_timeout = get_config("cull.timeout")
if cull_timeout:
- cull_cmd.append("--timeout=%s" % cull_timeout)
+ cull_cmd.append(f"--timeout={cull_timeout}")
cull_every = get_config("cull.every")
if cull_every:
- cull_cmd.append("--cull-every=%s" % cull_every)
+ cull_cmd.append(f"--cull-every={cull_every}")
cull_concurrency = get_config("cull.concurrency")
if cull_concurrency:
- cull_cmd.append("--concurrency=%s" % cull_concurrency)
+ cull_cmd.append(f"--concurrency={cull_concurrency}")
if get_config("cull.users"):
cull_cmd.append("--cull-users")
+ jupyterhub_idle_culler_role["scopes"].append("admin:users")
+
+ if not get_config("cull.adminUsers"):
+ cull_cmd.append("--cull-admin-users=false")
if get_config("cull.removeNamedServers"):
cull_cmd.append("--remove-named-servers")
cull_max_age = get_config("cull.maxAge")
if cull_max_age:
- cull_cmd.append("--max-age=%s" % cull_max_age)
+ cull_cmd.append(f"--max-age={cull_max_age}")
c.JupyterHub.services.append(
{
- "name": "cull-idle",
- "admin": True,
+ "name": "jupyterhub-idle-culler",
"command": cull_cmd,
}
)
+ c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role)
for key, service in get_config("hub.services", {}).items():
# c.JupyterHub.services is a list of dicts, but
@@ -402,26 +415,44 @@ def camelCaseify(s):
c.JupyterHub.services.append(service)
+for key, role in get_config("hub.loadRoles", {}).items():
+ # c.JupyterHub.load_roles is a list of dicts, but
+ # hub.loadRoles is a dict of dicts to make the config mergable
+ role.setdefault("name", key)
+
+ c.JupyterHub.load_roles.append(role)
+
+# respect explicit null command (distinct from unspecified)
+# this avoids relying on KubeSpawner.cmd's default being None
+_unspecified = object()
+specified_cmd = get_config("singleuser.cmd", _unspecified)
+if specified_cmd is not _unspecified:
+ c.Spawner.cmd = specified_cmd
-set_config_if_not_none(c.Spawner, "cmd", "singleuser.cmd")
set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl")
-cloud_metadata = get_config("singleuser.cloudMetadata", {})
+cloud_metadata = get_config("singleuser.cloudMetadata")
if cloud_metadata.get("blockWithIptables") == True:
# Use iptables to block access to cloud metadata by default
network_tools_image_name = get_config("singleuser.networkTools.image.name")
network_tools_image_tag = get_config("singleuser.networkTools.image.tag")
+ network_tools_resources = get_config("singleuser.networkTools.resources")
+ ip = cloud_metadata["ip"]
ip_block_container = client.V1Container(
name="block-cloud-metadata",
image=f"{network_tools_image_name}:{network_tools_image_tag}",
command=[
"iptables",
- "-A",
+ "--append",
"OUTPUT",
- "-d",
- cloud_metadata.get("ip", "169.254.169.254"),
- "-j",
+ "--protocol",
+ "tcp",
+ "--destination",
+ ip,
+ "--destination-port",
+ "80",
+ "--jump",
"DROP",
],
security_context=client.V1SecurityContext(
@@ -429,6 +460,7 @@ def camelCaseify(s):
run_as_user=0,
capabilities=client.V1Capabilities(add=["NET_ADMIN"]),
),
+ resources=network_tools_resources,
)
c.KubeSpawner.init_containers.append(ip_block_container)
@@ -438,17 +470,6 @@ def camelCaseify(s):
c.JupyterHub.log_level = "DEBUG"
c.Spawner.debug = True
-# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
-config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
-if os.path.isdir(config_dir):
- for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
- file_name = os.path.basename(file_path)
- print(f"Loading {config_dir} config: {file_name}")
- with open(file_path) as f:
- file_content = f.read()
- # compiling makes debugging easier: https://stackoverflow.com/a/437857
- exec(compile(source=file_content, filename=file_name, mode="exec"))
-
# load potentially seeded secrets
#
# NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable
@@ -471,11 +492,23 @@ def camelCaseify(s):
cfg.pop("keys", None)
c[app].update(cfg)
+# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
+config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
+if os.path.isdir(config_dir):
+ for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
+ file_name = os.path.basename(file_path)
+ print(f"Loading {config_dir} config: {file_name}")
+ with open(file_path) as f:
+ file_content = f.read()
+ # compiling makes debugging easier: https://stackoverflow.com/a/437857
+ exec(compile(source=file_content, filename=file_name, mode="exec"))
+
# execute hub.extraConfig entries
for key, config_py in sorted(get_config("hub.extraConfig", {}).items()):
- print("Loading extra config: %s" % key)
+ print(f"Loading extra config: {key}")
exec(config_py)
+# CLOUDHARNESS: EDIT START
# Allow switching authenticators easily
auth_type = get_config('hub.config.JupyterHub.authenticator_class')
email_domain = 'local'
@@ -525,4 +558,5 @@ def camelCaseify(s):
c.apps = get_config('apps')
c.registry = get_config('registry')
c.domain = get_config('root.domain')
-c.namespace = get_config('root.namespace')
\ No newline at end of file
+c.namespace = get_config('root.namespace')
+# CLOUDHARNESS: EDIT END
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
index 834a6b6c..fc368f64 100755
--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py
+++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
@@ -3,15 +3,15 @@
Methods here can be imported by extraConfig in values.yaml
"""
-from collections import Mapping
-from functools import lru_cache
import os
-import re
+from collections.abc import Mapping
+from functools import lru_cache
import yaml
+
# memoize so we only load config once
-@lru_cache()
+@lru_cache
def _load_config():
"""Load the Helm chart configuration used to render the Helm templates of
the chart from a mounted k8s Secret, and merge in values from an optionally
@@ -27,6 +27,7 @@ def _load_config():
cfg = _merge_dictionaries(cfg, values)
else:
print(f"No config at {path}")
+ # EDIT: CLOUDHARNESS START
path = f"/opt/cloudharness/resources/allvalues.yaml"
if os.path.exists(path):
print("Loading global CloudHarness config at", path)
@@ -34,11 +35,11 @@ def _load_config():
values = yaml.safe_load(f)
cfg = _merge_dictionaries(cfg, values)
cfg['root'] = values
-
+ # EDIT: CLOUDHARNESS END
return cfg
-@lru_cache()
+@lru_cache
def _get_config_value(key):
"""Load value from the k8s ConfigMap given a key."""
@@ -50,7 +51,7 @@ def _get_config_value(key):
raise Exception(f"{path} not found!")
-@lru_cache()
+@lru_cache
def get_secret_value(key, default="never-explicitly-set"):
"""Load value from the user managed k8s Secret or the default k8s Secret
given a key."""
@@ -117,7 +118,7 @@ def get_config(key, default=None):
else:
value = value[level]
-
+ # EDIT: CLOUDHARNESS START
if value and isinstance(value, str):
replace_var = re.search("{{.*?}}", value)
if replace_var:
@@ -128,6 +129,7 @@ def get_config(key, default=None):
if repl:
print("replace", variable, "in", value, ":", repl)
value = re.sub("{{.*?}}", repl, value)
+ # EDIT: CLOUDHARNESS END
return value
@@ -137,6 +139,5 @@ def set_config_if_not_none(cparent, name, key):
configuration item if not None
"""
data = get_config(key)
-
if data is not None:
- setattr(cparent, name, data)
\ No newline at end of file
+ setattr(cparent, name, data)
diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt
new file mode 100644
index 00000000..9769a9c7
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/NOTES.txt
@@ -0,0 +1,158 @@
+{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
+
+{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
+. __ __ __ __ __
+ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
+ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
+/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
+\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
+ /_/ /____/
+
+ You have successfully installed the official JupyterHub Helm chart!
+
+### Installation info
+
+ - Kubernetes namespace: {{ .Release.Namespace }}
+ - Helm release name: {{ .Release.Name }}
+ - Helm chart version: {{ .Chart.Version }}
+ - JupyterHub version: {{ .Chart.AppVersion }}
+ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
+
+### Followup links
+
+ - Documentation: https://z2jh.jupyter.org
+ - Help forum: https://discourse.jupyter.org
+ - Social chat: https://gitter.im/jupyterhub/jupyterhub
+ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
+
+### Post-installation checklist
+
+ - Verify that created Pods enter a Running state:
+
+ kubectl --namespace={{ .Release.Namespace }} get pod
+
+ If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe pod
+
+ If a pod keeps restarting, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} logs --previous
+ {{- println }}
+
+ {{- if eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer" }}
+ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
+
+ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
+
+ If the external ip remains , diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
+ {{- end }}
+
+ - Verify web based access:
+ {{- println }}
+ {{- if .Values.apps.jupyterhub.ingress.enabled }}
+ {{- range $host := .Values.apps.jupyterhub.ingress.hosts }}
+ Try insecure HTTP access: http://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+
+ {{- range $tls := .Values.apps.jupyterhub.ingress.tls }}
+ {{- range $host := $tls.hosts }}
+ Try secure HTTPS access: https://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+ {{- end }}
+ {{- else }}
+ You have not configured a k8s Ingress resource so you need to access the k8s
+ Service {{ $proxy_service }} directly.
+ {{- println }}
+
+ {{- if eq .Values.apps.jupyterhub.proxy.service.type "NodePort" }}
+ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
+ that all the k8s cluster's nodes are exposing the k8s Service via those
+ ports.
+
+ Try insecure HTTP access: http://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
+ Try secure HTTPS access: https://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
+
+ {{- else }}
+ If your computer is outside the k8s cluster, you can port-forward traffic to
+ the k8s Service {{ $proxy_service }} with kubectl to access it from your
+ computer.
+
+ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
+
+ Try insecure HTTP access: http://localhost:8080
+ {{- end }}
+ {{- end }}
+ {{- println }}
+
+
+
+
+
+{{- /*
+ Warnings for likely misconfigurations
+*/}}
+
+{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }}
+#################################################################################
+###### WARNING: You are using user placeholders without pod priority #####
+###### enabled*, either enable pod priority or stop using the #####
+###### user placeholders** to avoid having placeholders that #####
+###### refuse to make room for a real user. #####
+###### #####
+###### *scheduling.podPriority.enabled #####
+###### **scheduling.userPlaceholder.enabled #####
+###### **scheduling.userPlaceholder.replicas #####
+#################################################################################
+{{- println }}
+{{- end }}
+
+
+
+
+
+{{- /*
+ Breaking changes and failures for likely misconfigurations.
+*/}}
+
+{{- $breaking := "" }}
+{{- $breaking_title := "\n" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
+{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
+{{- $breaking_title = print $breaking_title "\n###### #####" }}
+{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
+{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+
+
+{{- /*
+ This is an example (in a helm template comment) on how to detect and
+ communicate with regards to a breaking chart config change.
+
+ {{- if hasKey .Values.apps.jupyterhub.singleuser.cloudMetadata "enabled" }}
+ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
+ {{- end }}
+*/}}
+
+
+{{- if hasKey .Values.apps.jupyterhub.rbac "enabled" }}
+{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }}
+{{- end }}
+
+
+{{- if hasKey .Values.apps.jupyterhub.hub "fsGid" }}
+{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
+{{- end }}
+
+
+{{- if and .Values.apps.jupyterhub.singleuser.cloudMetadata.blockWithIptables (and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled .Values.apps.jupyterhub.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }}
+{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }}
+{{- end }}
+
+
+{{- if $breaking }}
+{{- fail (print $breaking_title $breaking "\n\n") }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
index b742a126..3159d103 100644
--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
@@ -168,30 +168,30 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
{{- $c := dict }}
{{- $result := (dict "hub" (dict "config" $c)) }}
{{- /*
- Flattens the config in .Values.apps.jupyterhub.auth to a format of
+ Flattens the config in .Values.apps.jupyterhub.apps.jupyterhub.auth to a format of
"keyX.keyY...": "value". Writes output to $c.
*/}}
- {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.auth "type" "custom")) }}
+ {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.apps.jupyterhub.auth "type" "custom")) }}
{{- /*
Transform the flattened config using a dictionary
representing the old z2jh config, output the result
in $c.
*/}}
- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.global.safeToShowValues) }}
+ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }}
- {{- $class_old_config_key := .Values.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
+ {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
{{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}}
{{- $class_new_config_key := "" }} {{- /* LDAPAuthenticator - GitHubOAuthenticator */}}
{{- /* SET $class_new_entrypoint, $class_new_config_key */}}
{{- if eq $class_old_config_key "custom" }}
- {{- $class_new_entrypoint = .Values.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
+ {{- $class_new_entrypoint = .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
{{- $class_new_config_key = $class_new_entrypoint | splitList "." | last }}
{{- /* UPDATE c dict explicitly with auth.custom.config */}}
- {{- if .Values.apps.jupyterhub.auth.custom.config }}
- {{- $custom_config := merge (dict) .Values.apps.jupyterhub.auth.custom.config }}
- {{- if not .Values.apps.jupyterhub.global.safeToShowValues }}
+ {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
+ {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
+ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}}
{{- range $key, $val := $custom_config }}
{{- $_ := set $custom_config $key "***" }}
{{- end }}
@@ -213,7 +213,7 @@ The JupyterHub Helm chart's auth config has been reworked and requires changes.
The new way to configure authentication in chart version 0.11.0+ is printed
below for your convenience. The values are not shown by default to ensure no
-secrets are exposed, run helm upgrade with --set global.safeToShowValues=true
+secrets are exposed, run helm upgrade with --set global.safeToSho.Values.apps.jupyterhub.true
to show them.
{{ $result | toYaml }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
index e9cf7bb6..401d601a 100644
--- a/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
@@ -3,8 +3,8 @@
parent charts to reference these dynamic resource names.
To avoid duplicating documentation, for more information, please see the the
- fullnameOverride entry in schema.yaml or the configuration reference that
- schema.yaml renders to.
+ fullnameOverride entry in values.schema.yaml or the configuration reference
+ that values.schema.yaml renders to.
https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
*/}}
@@ -38,8 +38,8 @@
{{- $name_override := .Values.apps.jupyterhub.nameOverride }}
{{- if ne .Chart.Name "jupyterhub" }}
{{- if .Values.apps.jupyterhub.jupyterhub }}
- {{- $fullname_override = .Values.apps.jupyterhub.fullnameOverride }}
- {{- $name_override = .Values.apps.jupyterhub.nameOverride }}
+ {{- $fullname_override = .Values.apps.jupyterhub.jupyterhub.fullnameOverride }}
+ {{- $name_override = .Values.apps.jupyterhub.jupyterhub.nameOverride }}
{{- end }}
{{- end }}
@@ -76,12 +76,23 @@
{{- include "jupyterhub.fullname.dash" . }}hub
{{- end }}
+{{- /* hub-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.hub.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* hub-existing-secret Secret */}}
{{- define "jupyterhub.hub-existing-secret.fullname" -}}
{{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
{{- $existing_secret := .Values.apps.jupyterhub.hub.existingSecret }}
{{- if ne .Chart.Name "jupyterhub" }}
- {{- $existing_secret = .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- if .Values.apps.jupyterhub.jupyterhub }}
+ {{- $existing_secret = .Values.apps.jupyterhub.jupyterhub.hub.existingSecret }}
+ {{- end }}
{{- end }}
{{- if $existing_secret }}
{{- $existing_secret }}
@@ -133,11 +144,29 @@
{{- include "jupyterhub.fullname.dash" . }}autohttps
{{- end }}
+{{- /* autohttps-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.proxy.traefik.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler Deployment */}}
{{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
{{- include "jupyterhub.fullname.dash" . }}user-scheduler
{{- end }}
+{{- /* user-scheduler-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler leader election lock resource */}}
{{- define "jupyterhub.user-scheduler-lock.fullname" -}}
{{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
@@ -153,6 +182,15 @@
{{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
{{- end }}
+{{- /* image-awaiter-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* hook-image-puller DaemonSet */}}
{{- define "jupyterhub.hook-image-puller.fullname" -}}
{{- include "jupyterhub.fullname.dash" . }}hook-image-puller
@@ -210,6 +248,15 @@
{{- end }}
{{- end }}
+{{- /* image-puller Priority */}}
+{{- define "jupyterhub.image-puller-priority.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname.dash" . }}image-puller
+ {{- else }}
+ {{- .Release.Name }}-image-puller-priority
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler's registered name */}}
{{- define "jupyterhub.user-scheduler.fullname" -}}
{{- if (include "jupyterhub.fullname" .) }}
@@ -231,6 +278,7 @@
fullname: {{ include "jupyterhub.fullname" . | quote }}
fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
hub: {{ include "jupyterhub.hub.fullname" . | quote }}
+hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
@@ -241,10 +289,14 @@ proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
+autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
+user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
+image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
+hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
new file mode 100644
index 00000000..4075569e
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
@@ -0,0 +1,101 @@
+{{- /*
+ This named template renders egress rules for NetworkPolicy resources based on
+ common configuration.
+
+ It is rendering based on the `egressAllowRules` and `egress` keys of the
+ passed networkPolicy config object. Each flag set to true under
+ `egressAllowRules` is rendered to a egress rule that next to any custom user
+ defined rules from the `egress` config.
+
+ This named template needs to render based on a specific networkPolicy
+ resource, but also needs access to the root context. Due to that, it
+ accepts a list as its scope, where the first element is supposed to be the
+ root context and the second element is supposed to be the networkPolicy
+ configuration object.
+
+ As an example, this is how you would render this named template from a
+ NetworkPolicy resource under its egress:
+
+ egress:
+ # other rules here...
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+
+ Note that the reference to privateIPs and nonPrivateIPs relate to
+ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
+*/}}
+
+{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
+{{- $root := index . 0 }}
+{{- $netpol := index . 1 }}
+{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }}
+- ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ to:
+ {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }}
+ # Allow outbound connections to DNS ports on the cloud metadata server
+ - ipBlock:
+ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+ {{- end }}
+ {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }}
+ # Allow outbound connections to DNS ports on pods in the kube-system
+ # namespace
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ {{- end }}
+ {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
+ # Allow outbound connections to DNS ports on destinations in the private IP
+ # ranges
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+ {{- end }}
+{{- end }}
+
+{{- if $netpol.egressAllowRules.nonPrivateIPs }}
+# Allow outbound connections to non-private IP ranges
+- to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ except:
+ # As part of this rule:
+ # - don't allow outbound connections to private IPs
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ # - don't allow outbound connections to the cloud metadata server
+ - {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- if $netpol.egressAllowRules.privateIPs }}
+# Allow outbound connections to private IP ranges
+- to:
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+{{- end }}
+
+{{- if $netpol.egressAllowRules.cloudMetadataServer }}
+# Allow outbound connections to the cloud metadata server
+- to:
+ - ipBlock:
+ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- with $netpol.egress }}
+# Allow outbound connections based on user specified rules
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
index efea86d1..a2023639 100755
--- a/applications/jupyterhub/deploy/templates/_helpers.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
@@ -12,7 +12,7 @@
When you ask a helper to render its content, one often forward the current
scope to the helper in order to allow it to access .Release.Name,
- .Values.apps.jupyterhub.rbac.enabled and similar values.
+ .Values.apps.jupyterhub.rbac.create and similar values.
#### Example - Passing the current scope
{{ include "jupyterhub.commonLabels" . }}
@@ -180,8 +180,51 @@ component: {{ include "jupyterhub.componentLabel" . }}
Augments passed .pullSecrets with $.Values.apps.jupyterhub.imagePullSecrets
*/}}
{{- define "jupyterhub.imagePullSecrets" -}}
+ {{- /*
+ We have implemented a trick to allow a parent chart depending on this
+ chart to call this named templates.
+
+ Caveats and notes:
+
+ 1. While parent charts can reference these, grandparent charts can't.
+ 2. Parent charts must not use an alias for this chart.
+ 3. There is no failsafe workaround to above due to
+ https://github.com/helm/helm/issues/9214.
+ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
+ using "toYaml | fromYaml" in order to be able to use normal helm
+ template functions on it.
+ */}}
+ {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}}
+ {{- if ne .root.Chart.Name "jupyterhub" }}
+ {{- if .root.Values.apps.jupyterhub.jupyterhub }}
+ {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }}
+ {{- end }}
+ {{- end }}
+ {{- /* Populate $_.list with all relevant entries */}}
+ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
+ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
+ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
+ {{- end }}
+ {{- /* Decide if something should be written */}}
+ {{- if not (eq ($_.list | toJson) "[]") }}
+
+ {{- /* Process the $_.list where strings become dicts with a name key and the
+ strings become the name keys' values into $_.res */}}
+ {{- $_ := set $_ "res" list }}
+ {{- range $_.list }}
+ {{- if eq (typeOf .) "string" }}
+ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
+ {{- else }}
+ {{- $__ := set $_ "res" (append $_.res .) }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* Write the results */}}
+ {{- $_.res | toJson }}
+
+ {{- end }}
{{- end }}
{{- /*
@@ -339,3 +382,21 @@ limits:
{{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
{{- end }}
{{- end }}
+
+{{- /*
+ jupyterhub.chart-version-to-git-ref:
+ Renders a valid git reference from a chartpress generated version string.
+ In practice, either a git tag or a git commit hash will be returned.
+
+ - The version string will follow a chartpress pattern, see
+ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
+
+ - The regexReplaceAll function is a sprig library function, see
+ https://masterminds.github.io/sprig/strings.html.
+
+ - The regular expression is in golang syntax, but \d had to become \\d for
+ example.
+*/}}
+{{- define "jupyterhub.chart-version-to-git-ref" -}}
+{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
index c913f678..f52feb6a 100755
--- a/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
@@ -29,5 +29,6 @@ data:
*/}}
checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
+ # EDIT: CLOUDHARNESS
allvalues.yaml: |
{{- .Values | toYaml | nindent 4 }}
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
index 82132c62..d105ecca 100755
--- a/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
@@ -5,6 +5,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
+ {{- end }}
replicas: 1
selector:
matchLabels:
@@ -30,11 +33,14 @@ spec:
{{- . | toYaml | nindent 8 }}
{{- end }}
spec:
-{{ include "deploy_utils.etcHosts" . | indent 6 }}
+{{ include "deploy_utils.etcHosts" . | indent 6 }} # EDIT: CLOUDHARNESS
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.hub.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.hub.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -44,7 +50,7 @@ spec:
- name: config
configMap:
name: {{ include "jupyterhub.hub.fullname" . }}
- {{- /* This is needed by cloudharness libraries */}}
+ {{- /* EDIT: CLOUDHARNESS This is needed by cloudharness libraries */}}
- name: cloudharness-allvalues
configMap:
name: cloudharness-allvalues
@@ -82,11 +88,13 @@ spec:
persistentVolumeClaim:
claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
{{- end }}
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.hub.fullname" . }}
+ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
{{- end }}
+ {{- with .Values.apps.jupyterhub.hub.podSecurityContext }}
securityContext:
- fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.hub.image) }}
imagePullSecrets: {{ . }}
{{- end }}
@@ -153,14 +161,14 @@ spec:
name: config
- mountPath: /usr/local/etc/jupyterhub/secret/
name: secret
- - name: cloudharness-allvalues
+ - name: cloudharness-allvalues # EDIT: CLOUDHARNESS START
mountPath: /opt/cloudharness/resources/allvalues.yaml
subPath: allvalues.yaml
{{- if .Values.apps.accounts }}
- name: cloudharness-kc-accounts
mountPath: /opt/cloudharness/resources/auth
readOnly: true
- {{- end }}
+ {{- end }} # EDIT: CLOUDHARNESS END
{{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
- mountPath: /usr/local/etc/jupyterhub/existing-secret/
name: existing-secret
diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
index 9a7a6bc1..d9508e20 100755
--- a/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
@@ -61,31 +61,24 @@ spec:
egress:
# hub --> proxy
- - ports:
- - port: 8001
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "proxy") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8001
+
# hub --> singleuser-server
- - ports:
- - port: 8888
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
- # hub --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
-
- {{- with .Values.apps.jupyterhub.hub.networkPolicy.egress }}
- # hub --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
index 855609d4..bb6c7b16 100755
--- a/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
@@ -1,9 +1,5 @@
{{- if .Values.apps.jupyterhub.hub.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.hub.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
index 738daab1..1b689af4 100755
--- a/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
@@ -1,15 +1,4 @@
-{{- if .Values.apps.jupyterhub.rbac.enabled -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.hub.fullname" . }}
- {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
----
+{{- if .Values.apps.jupyterhub.rbac.create -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -32,7 +21,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.hub.fullname" . }}
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: Role
diff --git a/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
new file mode 100644
index 00000000..817ed661
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.apps.jupyterhub.hub.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-pull-secret.yaml b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
new file mode 100644
index 00000000..b7544db7
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.apps.jupyterhub.imagePullSecret.create }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation
+ "helm.sh/hook-weight": "-20"
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
index e16fd1a9..528345c0 100644
--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
@@ -34,6 +34,9 @@ spec:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
+ {{- if typeIs "int" .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
+ {{- end }}
template:
metadata:
labels:
@@ -44,13 +47,17 @@ spec:
{{- end }}
spec:
{{- /*
- continuous-image-puller pods are made evictable to save on the k8s pods
- per node limit all k8s clusters have.
+ image-puller pods are made evictable to save on the k8s pods
+ per node limit all k8s clusters have and have a higher priority
+ than user-placeholder pods that could block an entire node.
*/}}
- {{- if and (not .hook) .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
- priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
{{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations .Values.apps.jupyterhub.prePuller.extraTolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -127,6 +134,7 @@ spec:
{{- /* --- Conditionally pull profileList images --- */}}
{{- if .Values.apps.jupyterhub.prePuller.pullProfileListImages }}
{{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }}
+ {{- /* profile's kubespawner_override */}}
{{- if $container.kubespawner_override }}
{{- if $container.kubespawner_override.image }}
- name: image-pull-singleuser-profilelist-{{ $k }}
@@ -145,13 +153,15 @@ spec:
{{- end }}
{{- end }}
{{- end }}
- {{- end }}
- {{- end }}
-
- {{- /* --- Pull extra images --- */}}
- {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
- - name: image-pull-{{ $k }}
- image: {{ $v.name }}:{{ $v.tag }}
+ {{- /* kubespawner_override in profile's profile_options */}}
+ {{- if $container.profile_options }}
+ {{- range $option, $option_spec := $container.profile_options }}
+ {{- if $option_spec.choices }}
+ {{- range $choice, $choice_spec := $option_spec.choices }}
+ {{- if $choice_spec.kubespawner_override }}
+ {{- if $choice_spec.kubespawner_override.image }}
+ - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }}
+ image: {{ $choice_spec.kubespawner_override.image }}
command:
- /bin/sh
- -c
@@ -163,13 +173,20 @@ spec:
{{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }}
securityContext:
{{- . | toYaml | nindent 12 }}
- {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
{{- end }}
- {{- /* --- Pull CloudHarness tasks images --- */}}
- {{- range $k, $v := ( index .Values "task-images" ) }}
- - name: image-pull-{{ $k | replace "-" "" }}
- image: {{ $v }}
+ {{- /* --- Pull extra images --- */}}
+ {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
+ - name: image-pull-{{ $k }}
+ image: {{ $v.name }}:{{ $v.tag }}
command:
- /bin/sh
- -c
diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
index bdd9f63c..cc6db3ec 100755
--- a/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
@@ -28,16 +28,22 @@ spec:
labels:
{{- /* Changes here will cause the Job to restart the pods. */}}
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.prePuller.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with .Values.apps.jupyterhub.prePuller.annotations }}
annotations:
{{- . | toYaml | nindent 8 }}
{{- end }}
spec:
restartPolicy: Never
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.prePuller.hook.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -58,6 +64,7 @@ spec:
- -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
- -namespace={{ .Release.Namespace }}
- -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
+ - -pod-scheduling-wait-duration={{ .Values.apps.jupyterhub.prePuller.hook.podSchedulingWaitDuration }}
{{- with .Values.apps.jupyterhub.prePuller.hook.containerSecurityContext }}
securityContext:
{{- . | toYaml | nindent 12 }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
new file mode 100644
index 00000000..1a3fca33
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+{{- if or .Values.apps.jupyterhub.prePuller.hook.enabled .Values.apps.jupyterhub.prePuller.continuous.enabled -}}
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ annotations:
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+value: {{ .Values.apps.jupyterhub.scheduling.podPriority.imagePullerPriority }}
+globalDefault: false
+description: >-
+ Enables [hook|continuous]-image-puller pods to fit on nodes even though they
+ are clogged by user-placeholder pods, while not evicting normal user pods.
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
index 95c86ddf..5946896b 100755
--- a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
@@ -1,29 +1,8 @@
{{- /*
Permissions to be used by the hook-image-awaiter job
*/}}
-{{- if .Values.apps.jupyterhub.rbac.enabled }}
-{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) }}
-{{- /*
-This service account...
-*/ -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- hub.jupyter.org/deletable: "true"
- annotations:
- "helm.sh/hook": pre-install,pre-upgrade
- "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
- "helm.sh/hook-weight": "0"
- {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
- {{- . | toYaml | nindent 4 }}
- {{- end }}
----
-{{- /*
-... will be used by this role...
-*/}}
+{{- if .Values.apps.jupyterhub.rbac.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -56,7 +35,7 @@ metadata:
"helm.sh/hook-weight": "0"
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: Role
diff --git a/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
new file mode 100644
index 00000000..2e5fa728
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- /*
+ServiceAccount for the pre-puller hook's image-awaiter-job
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+ {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
deleted file mode 100755
index 08bd7bba..00000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Automatic HTTPS Terminator
-
-This directory has Kubernetes objects for automatic Let's Encrypt Support.
-When enabled, we create a new deployment object that has an nginx-ingress
-and kube-lego container in it. This is responsible for requesting,
-storing and renewing certificates as needed from Let's Encrypt.
-
-The only change required outside of this directory is in the `proxy-public`
-service, which targets different hubs based on automatic HTTPS status.
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
deleted file mode 100755
index 8d71a971..00000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-{{- $_ := .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
-
-# This configmap contains Traefik configuration files to be mounted.
-# - traefik.yaml will only be read during startup (static configuration)
-# - dynamic.yaml will be read on change (dynamic configuration)
-#
-# ref: https://docs.traefik.io/getting-started/configuration-overview/
-#
-# The configuration files are first rendered with Helm templating to large YAML
-# strings. Then we use the fromYAML function on these strings to get an object,
-# that we in turn merge with user provided extra configuration.
-#
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-data:
- traefik.yaml: |
- {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
- dynamic.yaml: |
- {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
-
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
deleted file mode 100755
index fcb062fd..00000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-spec:
- replicas: 1
- selector:
- matchLabels:
- {{- include "jupyterhub.matchLabels" . | nindent 6 }}
- template:
- metadata:
- labels:
- {{- include "jupyterhub.matchLabels" . | nindent 8 }}
- hub.jupyter.org/network-access-proxy-http: "true"
- {{- with .Values.apps.jupyterhub.proxy.traefik.labels }}
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- annotations:
- # Only force a restart through a change to this checksum when the static
- # configuration is changed, as the dynamic can be updated after start.
- # Any disruptions to this deployment impacts everything, it is the
- # entrypoint of all network traffic.
- checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
- spec:
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.autohttps.fullname" . }}
- {{- end }}
- {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
- priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
- {{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.traefik.nodeSelector }}
- {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.traefik.tolerations }}
- tolerations:
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
- volumes:
- - name: certificates
- emptyDir: {}
- - name: traefik-config
- configMap:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumes }}
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.proxy.traefik.image) }}
- imagePullSecrets: {{ . }}
- {{- end }}
- initContainers:
- - name: load-acme
- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- args:
- - load
- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- - acme.json
- - /etc/acme/acme.json
- env:
- # We need this to get logs immediately
- - name: PYTHONUNBUFFERED
- value: "True"
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
- {{- end }}
- volumeMounts:
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- containers:
- - name: traefik
- image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.resources }}
- resources:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- ports:
- - name: http
- containerPort: 8080
- - name: https
- containerPort: 8443
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPorts }}
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- volumeMounts:
- - name: traefik-config
- mountPath: /etc/traefik
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumeMounts }}
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
- env:
- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- - name: secret-sync
- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- args:
- - watch-save
- - --label=app={{ include "jupyterhub.appLabel" . }}
- - --label=release={{ .Release.Name }}
- - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- - --label=heritage=secret-sync
- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- - acme.json
- - /etc/acme/acme.json
- env:
- # We need this to get logs immediately
- - name: PYTHONUNBUFFERED
- value: "True"
- volumeMounts:
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPodSpec }}
- {{- . | toYaml | nindent 6 }}
- {{- end }}
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
deleted file mode 100755
index ea43b672..00000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
-rules:
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get", "patch", "list", "create"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-subjects:
-- kind: ServiceAccount
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- apiGroup:
-roleRef:
- kind: Role
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- apiGroup: rbac.authorization.k8s.io
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
deleted file mode 100755
index d57c135d..00000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "jupyterhub.proxy-http.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.proxy.service.labels }}
- {{- . | toYaml | nindent 4 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.service.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
-spec:
- type: ClusterIP
- selector:
- {{- $_ := merge (dict "componentLabel" "proxy") . }}
- {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
- ports:
- - port: 8000
- targetPort: http
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
index 6d63ba88..bb37b8f0 100755
--- a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
@@ -7,6 +7,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
+ {{- end }}
replicas: 1
selector:
matchLabels:
@@ -35,7 +38,7 @@ spec:
# match the k8s Secret during the first upgrade following an auth_token
# was generated.
checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
- checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
+ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum | quote }}
{{- with .Values.apps.jupyterhub.proxy.annotations }}
{{- . | toYaml | nindent 8 }}
{{- end }}
@@ -44,7 +47,10 @@ spec:
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.chp.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -135,6 +141,8 @@ spec:
livenessProbe:
initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.failureThreshold }}
httpGet:
path: /_chp_healthz
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
@@ -149,6 +157,8 @@ spec:
readinessProbe:
initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.failureThreshold }}
httpGet:
path: /_chp_healthz
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
index adc82773..88a00be6 100755
--- a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
@@ -85,32 +85,24 @@ spec:
egress:
# proxy --> hub
- - ports:
- - port: 8081
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "hub") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
# proxy --> singleuser-server
- - ports:
- - port: 8888
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
- # proxy --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
-
- {{- with .Values.apps.jupyterhub.proxy.chp.networkPolicy.egress }}
- # proxy --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.proxy.chp.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
index 1846a3b0..155895b0 100755
--- a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
@@ -1,9 +1,5 @@
{{- if .Values.apps.jupyterhub.proxy.chp.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.proxy.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
index 0d9ca5b2..f634ba9e 100755
--- a/applications/jupyterhub/deploy/templates/proxy/service.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml
@@ -35,12 +35,15 @@ metadata:
{{- end }}
spec:
selector:
+ # This service will target the autohttps pod if autohttps is configured, and
+ # the proxy pod if not. When autohttps is configured, the service proxy-http
+ # will be around to target the proxy pod directly.
{{- if $autoHTTPS }}
- component: autohttps
+ {{- $_ := merge (dict "componentLabel" "autohttps") . -}}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
{{- else }}
- component: proxy
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
{{- end }}
- release: {{ .Release.Name }}
ports:
{{- if $HTTPS }}
- name: https
diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
index 588cf196..1bed905e 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
@@ -4,22 +4,9 @@ kind: PriorityClass
metadata:
name: {{ include "jupyterhub.priority.fullname" . }}
annotations:
- # FIXME: PriorityClasses must be added before the other resources reference
- # them, and in the past a workaround was needed to accomplish this:
- # to make the resource a Helm hook.
- #
- # To transition this resource to no longer be a Helm hook resource,
- # we explicitly add ownership annotations/labels (in 1.0.0) which
- # will allow a future upgrade (in 2.0.0) to remove all hook and
- # ownership annotations/labels.
- #
- helm.sh/hook: pre-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/hook-weight: "-100"
meta.helm.sh/release-name: "{{ .Release.Name }}"
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
labels:
- app.kubernetes.io/managed-by: Helm
{{- $_ := merge (dict "componentLabel" "default-priority") . }}
{{- include "jupyterhub.labels" $_ | nindent 4 }}
value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
index b1dc6c5d..800ac208 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
@@ -3,11 +3,7 @@ The cluster autoscaler should be allowed to evict and reschedule these pods if
it would help in order to scale down a node.
*/}}
{{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.user-placeholder.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
index e03497db..688e217c 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
@@ -5,22 +5,9 @@ kind: PriorityClass
metadata:
name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
annotations:
- # FIXME: PriorityClasses must be added before the other resources reference
- # them, and in the past a workaround was needed to accomplish this:
- # to make the resource a Helm hook.
- #
- # To transition this resource to no longer be a Helm hook resource,
- # we explicitly add ownership annotations/labels (in 1.0.0) which
- # will allow a future upgrade (in 2.0.0) to remove all hook and
- # ownership annotations/labels.
- #
- helm.sh/hook: pre-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/hook-weight: "-100"
meta.helm.sh/release-name: "{{ .Release.Name }}"
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
labels:
- app.kubernetes.io/managed-by: Helm
{{- include "jupyterhub.labels" . | nindent 4 }}
value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }}
globalDefault: false
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
index 114f6262..c243beee 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
@@ -16,6 +16,9 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
podManagementPolicy: Parallel
+ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
+ {{- end }}
replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }}
selector:
matchLabels:
@@ -23,9 +26,16 @@ spec:
serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
template:
metadata:
+ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
labels:
{{- /* Changes here will cause the Deployment to restart the pods. */}}
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
spec:
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
@@ -33,7 +43,10 @@ spec:
{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }}
schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
index ef8a37f6..3e83b444 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
@@ -6,16 +6,28 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
data:
- # ref: https://kubernetes.io/docs/reference/scheduling/config/
+ {{- /*
+ This is configuration of a k8s official kube-scheduler binary running in the
+ user-scheduler.
+
+ ref: https://kubernetes.io/docs/reference/scheduling/config/
+ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
+ */}}
config.yaml: |
- apiVersion: kubescheduler.config.k8s.io/v1beta1
+ apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration
leaderElection:
- resourceLock: endpoints
+ resourceLock: leases
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resourceNamespace: "{{ .Release.Namespace }}"
profiles:
- schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.plugins }}
plugins:
- {{- .Values.apps.jupyterhub.scheduling.userScheduler.plugins | toYaml | nindent 10 }}
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.pluginConfig }}
+ pluginConfig:
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
index 1bcaf317..f22d0de8 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
@@ -6,6 +6,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
+ {{- end }}
replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }}
selector:
matchLabels:
@@ -14,16 +17,25 @@ spec:
metadata:
labels:
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
annotations:
checksum/config-map: {{ include (print $.Template.BasePath "/jupyterhub/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
spec:
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
{{- end }}
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.scheduling.userScheduler.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -44,13 +56,6 @@ spec:
{{- end }}
command:
- /usr/local/bin/kube-scheduler
- # NOTE: --leader-elect-... (new) and --lock-object-... (deprecated)
- # flags are silently ignored in favor of whats defined in the
- # passed KubeSchedulerConfiguration whenever --config is
- # passed.
- #
- # ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/
- #
# NOTE: --authentication-skip-lookup=true is used to avoid a
# seemingly harmless error, if we need to not skip
# "authentication lookup" in the future, see the linked issue.
@@ -65,12 +70,14 @@ spec:
livenessProbe:
httpGet:
path: /healthz
- port: 10251
+ scheme: HTTPS
+ port: 10259
initialDelaySeconds: 15
readinessProbe:
httpGet:
path: /healthz
- port: 10251
+ scheme: HTTPS
+ port: 10259
{{- with .Values.apps.jupyterhub.scheduling.userScheduler.resources }}
resources:
{{- . | toYaml | nindent 12 }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
index 04f2af8c..2c9c6de8 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
@@ -1,9 +1,5 @@
{{- if and .Values.apps.jupyterhub.scheduling.userScheduler.enabled .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
index 083e0654..9c7fab73 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
@@ -1,16 +1,5 @@
{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
-{{- if .Values.apps.jupyterhub.rbac.enabled }}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
----
+{{- if .Values.apps.jupyterhub.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -19,13 +8,23 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
rules:
# Copied from the system:kube-scheduler ClusterRole of the k8s version
- # matching the kube-scheduler binary we use. A modification of two resource
- # name references from kube-scheduler to user-scheduler-lock was made.
+ # matching the kube-scheduler binary we use. A modification has been made to
+ # resourceName fields to remain relevant for how we have named our resources
+ # in this Helm chart.
#
- # NOTE: These rules have been unchanged between 1.12 and 1.15, then changed in
- # 1.16 and in 1.17, but unchanged in 1.18 and 1.19.
+ # NOTE: These rules have been:
+ # - unchanged between 1.12 and 1.15
+ # - changed in 1.16
+ # - changed in 1.17
+ # - unchanged between 1.18 and 1.20
+ # - changed in 1.21: get/list/watch permission for namespace,
+ # csidrivers, csistoragecapacities was added.
+ # - unchanged between 1.22 and 1.27
+ # - changed in 1.28: permissions to get/update lock endpoint resource
+ # removed
+ # - unchanged between 1.28 and 1.29
#
- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L696-L829
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862
- apiGroups:
- ""
- events.k8s.io
@@ -50,21 +49,6 @@ rules:
verbs:
- get
- update
- - apiGroups:
- - ""
- resources:
- - endpoints
- verbs:
- - create
- - apiGroups:
- - ""
- resourceNames:
- - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
- resources:
- - endpoints
- verbs:
- - get
- - update
- apiGroups:
- ""
resources:
@@ -159,13 +143,37 @@ rules:
- get
- list
- watch
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csidrivers
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csistoragecapacities
+ verbs:
+ - get
+ - list
+ - watch
# Copied from the system:volume-scheduler ClusterRole of the k8s version
# matching the kube-scheduler binary we use.
#
- # NOTE: These rules have not changed between 1.12 and 1.19.
+ # NOTE: These rules have not changed between 1.12 and 1.29.
#
- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1213-L1240
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310
- apiGroups:
- ""
resources:
@@ -203,7 +211,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: ClusterRole
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
new file mode 100644
index 00000000..67618b03
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
index 3dfb1378..931a150f 100755
--- a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
@@ -62,23 +62,38 @@ spec:
egress:
# singleuser-server --> hub
- - ports:
- - port: 8081
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "hub") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
- # singleuser-server --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
+ # singleuser-server --> proxy
+ # singleuser-server --> autohttps
+ #
+ # While not critical for core functionality, a user or library code may rely
+ # on communicating with the proxy or autohttps pods via a k8s Service it can
+ # detected from well known environment variables.
+ #
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8000
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "autohttps") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8080
+ - port: 8443
- {{- with .Values.apps.jupyterhub.singleuser.networkPolicy.egress }}
- # singleuser-server --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.singleuser.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
new file mode 100644
index 00000000..e6eab9bd
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.apps.jupyterhub.singleuser.extraFiles }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.singleuser.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: Opaque
+{{- with include "jupyterhub.extraFiles.data" .Values.apps.jupyterhub.singleuser.extraFiles }}
+data:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- with include "jupyterhub.extraFiles.stringData" .Values.apps.jupyterhub.singleuser.extraFiles }}
+stringData:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/values.schema.yaml b/applications/jupyterhub/deploy/values.schema.yaml
new file mode 100644
index 00000000..69c13a83
--- /dev/null
+++ b/applications/jupyterhub/deploy/values.schema.yaml
@@ -0,0 +1,3014 @@
+# This schema (a jsonschema in YAML format) is used to generate
+# values.schema.json which is packaged with the Helm chart for client side
+# validation by helm of values before template rendering.
+#
+# This schema is also used by our documentation system to build the
+# configuration reference section based on the description fields. See
+# docs/source/conf.py for that logic!
+#
+# We look to document everything we have default values for in values.yaml, but
+# we don't look to enforce the perfect validation logic within this file.
+#
+# ref: https://json-schema.org/learn/getting-started-step-by-step.html
+#
+$schema: http://json-schema.org/draft-07/schema#
+type: object
+additionalProperties: false
+required:
+ - imagePullSecrets
+ - hub
+ - proxy
+ - singleuser
+ - ingress
+ - prePuller
+ - custom
+ - cull
+ - debug
+ - rbac
+ - global
+properties:
+ enabled:
+ type: [boolean, "null"]
+ description: |
+ `enabled` is ignored by the jupyterhub chart itself, but a chart depending
+ on the jupyterhub chart conditionally can make use this config option as
+ the condition.
+ fullnameOverride:
+ type: [string, "null"]
+ description: |
+ fullnameOverride and nameOverride allow you to adjust how the resources
+ part of the Helm chart are named.
+
+ Name format | Resource types | fullnameOverride | nameOverride | Note
+ ------------------------- | -------------- | ---------------- | ------------ | -
+ component | namespaced | `""` | * | Default
+ release-component | cluster wide | `""` | * | Default
+ fullname-component | * | str | * | -
+ release-component | * | null | `""` | -
+ release-(name-)component | * | null | str | omitted if contained in release
+ release-(chart-)component | * | null | null | omitted if contained in release
+
+ ```{admonition} Warning!
+ :class: warning
+ Changing fullnameOverride or nameOverride after the initial installation
+ of the chart isn't supported. Changing their values likely leads to a
+ reset of non-external JupyterHub databases, abandonment of users' storage,
+ and severed couplings to currently running user pods.
+ ```
+
+ If you are a developer of a chart depending on this chart, you should
+ avoid hardcoding names. If you want to reference the name of a resource in
+ this chart from a parent helm chart's template, you can make use of the
+ global named templates instead.
+
+ ```yaml
+ # some pod definition of a parent chart helm template
+ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ ```
+
+ To access them from a container, you can also rely on the hub ConfigMap
+ that contains entries of all the resource names.
+
+ ```yaml
+ # some container definition in a parent chart helm template
+ env:
+ - name: SCHEDULER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ key: user-scheduler
+ ```
+
+ nameOverride:
+ type: [string, "null"]
+ description: |
+ See the documentation under [`fullnameOverride`](schema_fullnameOverride).
+
+ imagePullSecret:
+ type: object
+ required: [create]
+ if:
+ properties:
+ create:
+ const: true
+ then:
+ additionalProperties: false
+ required: [registry, username, password]
+ description: |
+ This is configuration to create a k8s Secret resource of `type:
+ kubernetes.io/dockerconfigjson`, with credentials to pull images from a
+ private image registry. If you opt to do so, it will be available for use
+ by all pods in their respective `spec.imagePullSecrets` alongside other
+ k8s Secrets defined in `imagePullSecrets` or the pod respective
+ `...image.pullSecrets` configuration.
+
+ In other words, using this configuration option can automate both the
+ otherwise manual creation of a k8s Secret and the otherwise manual
+ configuration to reference this k8s Secret in all the pods of the Helm
+ chart.
+
+ ```sh
+ # you won't need to create a k8s Secret manually...
+ kubectl create secret docker-registry image-pull-secret \
+ --docker-server= \
+ --docker-username= \
+ --docker-email= \
+ --docker-password=
+ ```
+
+ If you just want to let all Pods reference an existing secret, use the
+ [`imagePullSecrets`](schema_imagePullSecrets) configuration instead.
+ properties:
+ create:
+ type: boolean
+ description: |
+ Toggle the creation of the k8s Secret with provided credentials to
+ access a private image registry.
+ automaticReferenceInjection:
+ type: boolean
+ description: |
+ Toggle the automatic reference injection of the created Secret to all
+ pods' `spec.imagePullSecrets` configuration.
+ registry:
+ type: string
+ description: |
+ Name of the private registry you want to create a credential set for.
+ It will default to Docker Hub's image registry.
+
+ Examples:
+ - https://index.docker.io/v1/
+ - quay.io
+ - eu.gcr.io
+ - alexmorreale.privatereg.net
+ username:
+ type: string
+ description: |
+ Name of the user you want to use to connect to your private registry.
+
+ For external gcr.io, you will use the `_json_key`.
+
+ Examples:
+ - alexmorreale
+ - alex@pfc.com
+ - _json_key
+ password:
+ type: string
+ description: |
+ Password for the private image registry's user.
+
+ Examples:
+ - plaintextpassword
+ - abc123SECRETzyx098
+
+ For gcr.io registries the password will be a big JSON blob for a
+ Google cloud service account, it should look something like below.
+
+ ```yaml
+ password: |-
+ {
+ "type": "service_account",
+ "project_id": "jupyter-se",
+ "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85",
+ ...
+ }
+ ```
+ email:
+ type: [string, "null"]
+ description: |
+ Specification of an email is most often not required, but it is
+ supported.
+
+ imagePullSecrets:
+ type: array
+ description: |
+ Chart wide configuration to _append_ k8s Secret references to all its
+ pod's `spec.imagePullSecrets` configuration.
+
+ This will not override or get overridden by pod specific configuration,
+ but instead augment the pod specific configuration.
+
+ You can use both the k8s native syntax, where each list element is like
+ `{"name": "my-secret-name"}`, or you can let list elements be strings
+ naming the secrets directly.
+
+ hub:
+ type: object
+ additionalProperties: false
+ required: [baseUrl]
+ properties:
+ revisionHistoryLimit: &revisionHistoryLimit
+ type: [integer, "null"]
+ minimum: 0
+ description: |
+ Configures the resource's `spec.revisionHistoryLimit`. This is
+ available for Deployment, StatefulSet, and DaemonSet resources.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)
+ for more info.
+ config:
+ type: object
+ additionalProperties: true
+ description: |
+ JupyterHub and its components (authenticators, spawners, etc), are
+ Python classes that expose its configuration through
+ [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this
+ Helm chart configuration (`hub.config`), you can directly configure
+ the Python classes through _static_ YAML values. To _dynamically_ set
+ values, you need to use [`hub.extraConfig`](schema_hub.extraConfig)
+ instead.
+
+ ```{admonition} Currently intended only for auth config
+ :class: warning
+ This config _currently_ (0.11.0) only influence the software in the
+ `hub` Pod, but some Helm chart config options such as
+ [`hub.baseUrl`](schema_hub.baseUrl) is used to set
+ `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm
+ templates are rendered.
+
+ As we have not yet mapped out all the potential configuration
+ conflicts except for the authentication related configuration options,
+ please accept that using it for something else at this point can lead
+ to issues.
+ ```
+
+ __Example__
+
+ If you inspect documentation or some `jupyterhub_config.py` to contain
+ the following section:
+
+ ```python
+ c.JupyterHub.admin_access = true
+ c.JupyterHub.admin_users = ["jovyan1", "jovyan2"]
+ c.KubeSpawner.k8s_api_request_timeout = 10
+ c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"]
+ ```
+
+ Then, you would be able to represent it with this configuration like:
+
+ ```yaml
+ hub:
+ config:
+ JupyterHub:
+ admin_access: true
+ admin_users:
+ - jovyan1
+ - jovyan2
+ KubeSpawner:
+ k8s_api_request_timeout: 10
+ GitHubOAuthenticator:
+ allowed_organizations:
+ - jupyterhub
+ ```
+
+ ```{admonition} YAML limitations
+ :class: tip
+ You can't represent Python `Bytes` or `Set` objects in YAML directly.
+ ```
+
+ ```{admonition} Helm value merging
+ :class: tip
+ `helm` merges a Helm chart's default values with values passed with
+ the `--values` or `-f` flag. During merging, lists are replaced while
+ dictionaries are updated.
+ ```
+ extraFiles: &extraFiles
+ type: object
+ additionalProperties: false
+ description: |
+ A dictionary with extra files to be injected into the pod's container
+ on startup. This can for example be used to inject: configuration
+ files, custom user interface templates, images, and more.
+
+ ```yaml
+ # NOTE: "hub" is used in this example, but the configuration is the
+ # same for "singleuser".
+ hub:
+ extraFiles:
+ # The file key is just a reference that doesn't influence the
+ # actual file name.
+ :
+ # mountPath is required and must be the absolute file path.
+ mountPath:
+
+ # Choose one out of the three ways to represent the actual file
+ # content: data, stringData, or binaryData.
+ #
+ # data should be set to a mapping (dictionary). It will in the
+ # end be rendered to either YAML, JSON, or TOML based on the
+ # filename extension that are required to be either .yaml, .yml,
+ # .json, or .toml.
+ #
+ # If your content is YAML, JSON, or TOML, it can make sense to
+ # use data to represent it over stringData as data can be merged
+ # instead of replaced if set partially from separate Helm
+ # configuration files.
+ #
+ # Both stringData and binaryData should be set to a string
+ # representing the content, where binaryData should be the
+ # base64 encoding of the actual file content.
+ #
+ data:
+ myConfig:
+ myMap:
+ number: 123
+ string: "hi"
+ myList:
+ - 1
+ - 2
+ stringData: |
+ hello world!
+ binaryData: aGVsbG8gd29ybGQhCg==
+
+ # mode is by default 0644 and you can optionally override it
+ # either by octal notation (example: 0400) or decimal notation
+ # (example: 256).
+ mode:
+ ```
+
+ **Using --set-file**
+
+ To avoid embedding entire files in the Helm chart configuration, you
+ can use the `--set-file` flag during `helm upgrade` to set the
+ stringData or binaryData field.
+
+ ```yaml
+ hub:
+ extraFiles:
+ my_image:
+ mountPath: /usr/local/share/jupyterhub/static/my_image.png
+
+ # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are
+ # automatically loaded in alphabetical order of the final file
+ # name when JupyterHub starts.
+ my_config:
+ mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py
+ ```
+
+ ```bash
+ # --set-file expects a text based file, so you need to base64 encode
+ # it manually first.
+ base64 my_image.png > my_image.png.b64
+
+ helm upgrade <...> \
+ --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \
+ --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py
+ ```
+
+ **Common uses**
+
+ 1. **JupyterHub template customization**
+
+ You can replace the default JupyterHub user interface templates in
+ the hub pod by injecting new ones to
+ `/usr/local/share/jupyterhub/templates`. These can in turn
+ reference custom images injected to
+ `/usr/local/share/jupyterhub/static`.
+
+ 1. **JupyterHub standalone file config**
+
+ Instead of embedding JupyterHub python configuration as a string
+ within a YAML file through
+ [`hub.extraConfig`](schema_hub.extraConfig), you can inject a
+ standalone .py file into
+ `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is
+ automatically loaded.
+
+ 1. **Flexible configuration**
+
+ By injecting files, you don't have to embed them in a docker image
+ that you have to rebuild.
+
+ If your configuration file is a YAML/JSON/TOML file, you can also
+ use `data` instead of `stringData` which allow you to set various
+ configuration in separate Helm config files. This can be useful to
+ help dependent charts override only some configuration part of the
+ file, or to allow for the configuration be set through multiple
+ Helm configuration files.
+
+ **Limitations**
+
+ 1. File size
+
+ The files in `hub.extraFiles` and `singleuser.extraFiles` are
+ respectively stored in their own k8s Secret resource. As k8s
+ Secret's are limited, typically to 1MB, you will be limited to a
+ total file size of less than 1MB as there is also base64 encoding
+ that takes place reducing available capacity to 75%.
+
+ 2. File updates
+
+ The files that are mounted are only set during container startup.
+ This is [because we use
+ `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)
+ as is required to avoid replacing the content of the entire
+ directory we mount in.
+ patternProperties:
+ ".*":
+ type: object
+ additionalProperties: false
+ required: [mountPath]
+ oneOf:
+ - required: [data]
+ - required: [stringData]
+ - required: [binaryData]
+ properties:
+ mountPath:
+ type: string
+ data:
+ type: object
+ additionalProperties: true
+ stringData:
+ type: string
+ binaryData:
+ type: string
+ mode:
+ type: number
+ baseUrl:
+ type: string
+ description: |
+ This is the equivalent of c.JupyterHub.base_url, but it is also needed
+ by the Helm chart in general. So, instead of setting
+ c.JupyterHub.base_url, use this configuration.
+ command:
+ type: array
+ description: |
+ A list of strings to be used to replace the JupyterHub image's
+ `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's
+ `ENTRYPOINT` is called `command`. The list of strings will be expanded
+ with Helm's template function `tpl` which can render Helm template
+ logic inside curly braces (`{{... }}`).
+
+ This could be useful to wrap the invocation of JupyterHub itself in
+ some custom way.
+
+ For more details, see the [Kubernetes
+ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
+ args:
+ type: array
+ description: |
+ A list of strings to be used to replace the JupyterHub image's `CMD`
+ entry as well as the Helm chart's default way to start JupyterHub.
+ Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The
+ list of strings will be expanded with Helm's template function `tpl`
+ which can render Helm template logic inside curly braces (`{{... }}`).
+
+ ```{warning}
+ By replacing the entire configuration file, which is mounted to
+ `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart,
+ instead of appending to it with `hub.extraConfig`, you expose your
+ deployment for issues stemming from getting out of sync with the Helm
+ chart's config file.
+
+ These kind of issues will be significantly harder to debug and
+ diagnose, and can due to this could cause a lot of time expenditure
+ for both the community maintaining the Helm chart as well as yourself,
+ even if this wasn't the reason for the issue.
+
+ Due to this, we ask that you do your _absolute best to avoid replacing
+ the default provided `jupyterhub_config.py` file. It can often be
+ possible. For example, if your goal is to have a dedicated .py file
+ for more extensive additions that you can syntax highlight and such
+ and feel limited by passing code in `hub.extraConfig` which is part of
+ a YAML file, you can use [this
+ trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237)
+ instead.
+ ```
+
+ ```yaml
+ hub:
+ args:
+ - "jupyterhub"
+ - "--config"
+ - "/usr/local/etc/jupyterhub/jupyterhub_config.py"
+ - "--debug"
+ - "--upgrade-db"
+ ```
+
+ For more details, see the [Kubernetes
+ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
+ cookieSecret:
+ type: [string, "null"]
+ description: |
+ ```{note}
+ As of version 1.0.0 this will automatically be generated and there is
+ no need to set it manually.
+
+ If you wish to reset a generated key, you can use `kubectl edit` on
+ the k8s Secret typically named `hub` and remove the
+ `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then
+ perform a new `helm upgrade`.
+ ```
+
+ A 32-byte cryptographically secure randomly generated string used to sign values of
+ secure cookies set by the hub. If unset, jupyterhub will generate one on startup and
+ save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of
+ the hub container. A value set here will make JupyterHub overwrite any previous file.
+
+ You do not need to set this at all if you are using the default configuration for
+ storing databases - sqlite on a persistent volume (with `hub.db.type` set to the
+ default `sqlite-pvc`). If you are using an external database, then you must set this
+ value explicitly - or your users will keep getting logged out each time the hub pod
+ restarts.
+
+ Changing this value will all user logins to be invalidated. If this secret leaks,
+ *immediately* change it to something else, or user data can be compromised
+
+ ```sh
+ # to generate a value, run
+ openssl rand -hex 32
+ ```
+ image: &image-spec
+ type: object
+ additionalProperties: false
+ required: [name, tag]
+ description: |
+ Set custom image name, tag, pullPolicy, or pullSecrets for the pod.
+ properties:
+ name:
+ type: string
+ description: |
+ The name of the image, without the tag.
+
+ ```
+ # example name
+ gcr.io/my-project/my-image
+ ```
+ tag:
+ type: string
+ description: |
+ The tag of the image to pull. This is the value following `:` in
+ complete image specifications.
+
+ ```
+ # example tags
+ v1.11.1
+ zhy270a
+ ```
+ pullPolicy:
+ enum: [null, "", IfNotPresent, Always, Never]
+ description: |
+ Configures the Pod's `spec.imagePullPolicy`.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)
+ for more info.
+ pullSecrets:
+ type: array
+ description: |
+ A list of references to existing Kubernetes Secrets with
+ credentials to pull the image.
+
+ This Pod's final `imagePullSecrets` k8s specification will be a
+ combination of:
+
+ 1. This list of k8s Secrets, specific for this pod.
+ 2. The list of k8s Secrets, for use by all pods in the Helm chart,
+ declared in this Helm charts configuration called
+ `imagePullSecrets`.
+ 3. A k8s Secret, for use by all pods in the Helm chart, if
+ conditionally created from image registry credentials provided
+ under `imagePullSecret` if `imagePullSecret.create` is set to
+ true.
+
+ ```yaml
+ # example - k8s native syntax
+ pullSecrets:
+ - name: my-k8s-secret-with-image-registry-credentials
+
+ # example - simplified syntax
+ pullSecrets:
+ - my-k8s-secret-with-image-registry-credentials
+ ```
+ networkPolicy: &networkPolicy-spec
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration regards the creation and configuration of a k8s
+ _NetworkPolicy resource_.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Toggle the creation of the NetworkPolicy resource targeting this
+ pod, and by doing so, restricting its communication to only what
+ is explicitly allowed in the NetworkPolicy.
+ ingress:
+ type: array
+ description: |
+ Additional ingress rules to add besides those that are required
+ for core functionality.
+ egress:
+ type: array
+ description: |
+ Additional egress rules to add besides those that are required for
+ core functionality and those added via
+ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).
+
+ ```{versionchanged} 2.0.0
+ The default value changed from providing one very permissive rule
+ allowing all egress to providing no rule. The permissive rule is
+ still provided via
+ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)
+ set to true though.
+ ```
+
+ As an example, below is a configuration that disables the more
+ broadly permissive `.privateIPs` egress allow rule for the hub
+ pod, and instead provides tightly scoped permissions to access a
+ specific k8s local service as identified by pod labels.
+
+ ```yaml
+ hub:
+ networkPolicy:
+ egressAllowRules:
+ privateIPs: false
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: my-k8s-local-service
+ ports:
+ - protocol: TCP
+ port: 5978
+ ```
+ egressAllowRules:
+ type: object
+ additionalProperties: false
+ description: |
+ This is a set of predefined rules that when enabled will be added
+ to the NetworkPolicy list of egress rules.
+
+ The resulting egress rules will be a composition of:
+ - rules specific for the respective pod(s) function within the
+ Helm chart
+ - rules based on enabled `egressAllowRules` flags
+ - rules explicitly specified by the user
+
+ ```{note}
+ Each flag under this configuration will not render into a
+ dedicated rule in the NetworkPolicy resource, but instead combine
+ with the other flags to a reduced set of rules to avoid a
+ performance penalty.
+ ```
+
+ ```{versionadded} 2.0.0
+ ```
+ properties:
+ cloudMetadataServer:
+ type: boolean
+ description: |
+ Defaults to `false` for singleuser servers, but to `true` for
+ all other network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the cloud metadata server.
+
+ Note that the `nonPrivateIPs` rule is allowing all non Private
+ IP ranges but makes an exception for the cloud metadata
+ server, leaving this as the definitive configuration to allow
+ access to the cloud metadata server.
+
+ ```{versionchanged} 3.0.0
+ This configuration is not allowed to be configured true at the
+ same time as
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ to avoid an ambiguous configuration.
+ ```
+ dnsPortsCloudMetadataServer:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the cloud metadata server
+ via port 53.
+
+ Relying on this rule for the singleuser config should go hand
+ in hand with disabling
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ to avoid an ambiguous configuration.
+
+ Known situations when this rule can be relevant:
+
+ - In GKE clusters with Cloud DNS that is reached at the
+ cloud metadata server's non-private IP.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+ ```
+
+ ```{versionadded} 3.0.0
+ ```
+ dnsPortsKubeSystemNamespace:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to pods in the kube-system
+ namespace via port 53.
+
+ Known situations when this rule can be relevant:
+
+ - GKE, EKS, AKS, and other clusters relying directly on
+ `kube-dns` or `coredns` pods in the `kube-system` namespace.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+ ```
+
+ ```{versionadded} 3.0.0
+ ```
+ dnsPortsPrivateIPs:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to private IPs via port 53.
+
+ Known situations when this rule can be relevant:
+
+ - GKE clusters relying on a DNS server indirectly via a a node
+ local DNS cache at an unknown private IP.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+
+ ```{warning}
+ This rule is not expected to work in clusters relying on
+ Cilium to enforce the NetworkPolicy rules (includes GKE
+ clusters with Dataplane v2), this is due to a [known
+ limitation](https://github.com/cilium/cilium/issues/9209).
+ ```
+ nonPrivateIPs:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the non-private IP ranges
+ with the exception of the cloud metadata server. This means
+ respective pod(s) can establish connections to the internet
+ but not (say) an unsecured prometheus server running in the
+ same cluster.
+ privateIPs:
+ type: boolean
+ description: |
+ Defaults to `false` for singleuser servers, but to `true` for
+ all other network policies.
+
+ Private IPs refer to the IP ranges `10.0.0.0/8`,
+ `172.16.0.0/12`, `192.168.0.0/16`.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the internal k8s cluster.
+ This means users can access the internet but not (say) an
+ unsecured prometheus server running in the same cluster.
+
+ Since not all workloads in the k8s cluster may have
+ NetworkPolicies setup to restrict their incoming connections,
+ having this set to false can be a good defense against
+ malicious intent from someone in control of software in these
+ pods.
+
+ If possible, try to avoid setting this to true as it gives
+ broad permissions that could be specified more directly via
+ the [`.egress`](schema_singleuser.networkPolicy.egress).
+
+ ```{warning}
+ This rule is not expected to work in clusters relying on
+ Cilium to enforce the NetworkPolicy rules (includes GKE
+ clusters with Dataplane v2), this is due to a [known
+ limitation](https://github.com/cilium/cilium/issues/9209).
+ ```
+ interNamespaceAccessLabels:
+ enum: [accept, ignore]
+ description: |
+ This configuration option determines if both namespaces and pods
+ in other namespaces, that have specific access labels, should be
+ accepted to allow ingress (set to `accept`), or, if the labels are
+ to be ignored when applied outside the local namespace (set to
+ `ignore`).
+
+ The available access labels for respective NetworkPolicy resources
+ are:
+
+ - `hub.jupyter.org/network-access-hub: "true"` (hub)
+ - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik)
+ - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp)
+ - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser)
+ allowedIngressPorts:
+ type: array
+ description: |
+ A rule to allow ingress on these ports will be added no matter
+ what the origin of the request is. The default setting for
+ `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is
+ `[http, https]`, while it is `[]` for other networkPolicies.
+
+ Note that these port names or numbers target a Pod's port name or
+ number, not a k8s Service's port name or number.
+ db:
+ type: object
+ additionalProperties: false
+ properties:
+ type:
+ enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other]
+ description: |
+ Type of database backend to use for the hub database.
+
+ The Hub requires a persistent database to function, and this lets you specify
+ where it should be stored.
+
+ The various options are:
+
+ 1. **sqlite-pvc**
+
+ Use an `sqlite` database kept on a persistent volume attached to the hub.
+
+ By default, this disk is created by the cloud provider using
+ *dynamic provisioning* configured by a [storage
+ class](https://kubernetes.io/docs/concepts/storage/storage-classes/).
+ You can customize how this disk is created / attached by
+ setting various properties under `hub.db.pvc`.
+
+ This is the default setting, and should work well for most cloud provider
+ deployments.
+
+ 2. **sqlite-memory**
+
+ Use an in-memory `sqlite` database. This should only be used for testing,
+ since the database is erased whenever the hub pod restarts - causing the hub
+ to lose all memory of users who had logged in before.
+
+ When using this for testing, make sure you delete all other objects that the
+ hub has created (such as user pods, user PVCs, etc) every time the hub restarts.
+ Otherwise you might run into errors about duplicate resources.
+
+ 3. **mysql**
+
+ Use an externally hosted mysql database.
+
+ You have to specify an sqlalchemy connection string for the mysql database you
+ want to connect to in `hub.db.url` if using this option.
+
+ The general format of the connection string is:
+ ```
+ mysql+pymysql://:@:/
+ ```
+
+ The user specified in the connection string must have the rights to create
+ tables in the database specified.
+
+ 4. **postgres**
+
+ Use an externally hosted postgres database.
+
+ You have to specify an sqlalchemy connection string for the postgres database you
+ want to connect to in `hub.db.url` if using this option.
+
+ The general format of the connection string is:
+ ```
+ postgresql+psycopg2://:@:/
+ ```
+
+ The user specified in the connection string must have the rights to create
+ tables in the database specified.
+
+ 5. **other**
+
+ Use an externally hosted database of some kind other than mysql
+ or postgres.
+
+ When using _other_, the database password must be passed as
+ part of [hub.db.url](schema_hub.db.url) as
+ [hub.db.password](schema_hub.db.password) will be ignored.
+ pvc:
+ type: object
+ additionalProperties: false
+ required: [storage]
+ description: |
+ Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`.
+ properties:
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: &labels-and-annotations-patternProperties
+ ".*":
+ type: string
+ description: |
+ Annotations to apply to the PVC containing the sqlite database.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ selector:
+ type: object
+ additionalProperties: true
+ description: |
+ Label selectors to set for the PVC containing the sqlite database.
+
+ Useful when you are using a specific PV, and want to bind to
+ that and only that.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+ for more details about using a label selector for what PV to
+ bind to.
+ storage:
+ type: string
+ description: |
+ Size of disk to request for the database disk.
+ accessModes:
+ type: array
+ items:
+ type: [string, "null"]
+ description: |
+ AccessModes contains the desired access modes the volume
+ should have. See [the k8s
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1)
+ for more information.
+ storageClassName:
+ type: [string, "null"]
+ description: |
+ Name of the StorageClass required by the claim.
+
+ If this is a blank string it will be set to a blank string,
+ while if it is null, it will not be set at all.
+ subPath:
+ type: [string, "null"]
+ description: |
+ Path within the volume from which the container's volume
+ should be mounted. Defaults to "" (volume's root).
+ upgrade:
+ type: [boolean, "null"]
+ description: |
+ Users with external databases need to opt-in for upgrades of the
+ JupyterHub specific database schema if needed as part of a
+ JupyterHub version upgrade.
+ url:
+ type: [string, "null"]
+ description: |
+ Connection string when `hub.db.type` is mysql or postgres.
+
+ See documentation for `hub.db.type` for more details on the format of this property.
+ password:
+ type: [string, "null"]
+ description: |
+ Password for the database when `hub.db.type` is mysql or postgres.
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the hub pod.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ initContainers:
+ type: array
+ description: |
+ list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ hub:
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the hub pod.
+
+ Environment variables are usually used to:
+ - Pass parameters to some custom code in `hub.extraConfig`.
+ - Configure code running in the hub pod, such as an authenticator or
+ spawner.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ hub:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ HUB_NAMESPACE:
+ name: HUB_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_HUB_NAMESPACE:
+ value: "my-prefix-$(HUB_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ extraConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ Arbitrary extra python based configuration that should be in `jupyterhub_config.py`.
+
+ This is the *escape hatch* - if you want to configure JupyterHub to do something specific
+ that is not present here as an option, you can write the raw Python to do it here.
+
+ extraConfig is a *dict*, so there can be multiple configuration
+ snippets under different names. The configuration sections are run in
+ alphabetical order based on the keys.
+
+ Non-exhaustive examples of things you can do here:
+ - Subclass authenticator / spawner to do a custom thing
+ - Dynamically launch different images for different sets of images
+ - Inject an auth token from GitHub authenticator into user pod
+ - Anything else you can think of!
+
+ Since this is usually a multi-line string, you want to format it using YAML's
+ [| operator](https://yaml.org/spec/1.2.2/#23-scalars).
+
+ For example:
+
+ ```yaml
+ hub:
+ extraConfig:
+ myConfig.py: |
+ c.JupyterHub.something = 'something'
+ c.Spawner.something_else = 'something else'
+ ```
+
+ ```{note}
+ No code validation is performed until JupyterHub loads it! If you make
+ a typo here, it will probably manifest itself as the hub pod failing
+ to start up and instead entering an `Error` state or the subsequent
+ `CrashLoopBackoff` state.
+
+ To make use of your own programs linters etc, it would be useful to
+ not embed Python code inside a YAML file. To do that, consider using
+ [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to
+ `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your
+ extra configuration logic.
+ ```
+
+ fsGid:
+ type: [integer, "null"]
+ minimum: 0
+ # This schema entry is needed to help us print a more helpful error
+ # message in NOTES.txt if hub.fsGid is set.
+ #
+ description: |
+ ```{note}
+ Removed in version 2.0.0. Use
+ [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify
+ `fsGroup` instead.
+ ```
+ service:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to configure the service the JupyterHub will be exposed on by the Kubernetes server.
+ properties:
+ type:
+ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
+ description: |
+ The Kubernetes ServiceType to be used.
+
+ The default type is `ClusterIP`.
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
+ to learn more about service types.
+ ports:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to configure the ports the hub service will be deployed on.
+ properties:
+ nodePort:
+ type: [integer, "null"]
+ minimum: 0
+ description: |
+ The nodePort to deploy the hub service on.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Kubernetes annotations to apply to the hub service.
+ extraPorts:
+ type: array
+ description: |
+ Extra ports to add to the Hub Service object besides `hub` / `8081`.
+ This should be an array that includes `name`, `port`, and `targetPort`.
+ See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details.
+ loadBalancerIP:
+ type: [string, "null"]
+ description: |
+ A public IP address the hub Kubernetes service should be exposed
+ on. To expose the hub directly is not recommended. Instead route
+ traffic through the proxy-public service towards the hub.
+
+ pdb: &pdb-spec
+ type: object
+ additionalProperties: false
+ description: |
+ Configure a PodDisruptionBudget for this Deployment.
+
+ These are disabled by default for our deployments that don't support
+ being run in parallel with multiple replicas. Only the user-scheduler
+ currently supports being run in parallel with multiple replicas. If
+ they are enabled for a Deployment with only one replica, they will
+ block `kubectl drain` of a node for example.
+
+ Note that if you aim to block scaling down a node with the
+ hub/proxy/autohttps pod that would cause disruptions of the
+ deployment, then you should instead annotate the pods of the
+ Deployment [as described
+ here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).
+
+ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)
+ for more details about disruptions.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Decides if a PodDisruptionBudget is created targeting the
+ Deployment's pods.
+ maxUnavailable:
+ type: [integer, "null"]
+ description: |
+ The maximum number of pods that can be unavailable during
+ voluntary disruptions.
+ minAvailable:
+ type: [integer, "null"]
+ description: |
+ The minimum number of pods required to be available during
+ voluntary disruptions.
+ existingSecret:
+ type: [string, "null"]
+ description: |
+ This option allow you to provide the name of an existing k8s Secret to
+ use alongside of the chart managed k8s Secret. The content of this k8s
+ Secret will be merged with the chart managed k8s Secret, giving
+ priority to the self-managed k8s Secret.
+
+ ```{warning}
+ 1. The self managed k8s Secret must mirror the structure in the chart
+ managed secret.
+ 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka.
+ `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from
+ the chart managed k8s Secret.
+ ```
+ nodeSelector: &nodeSelector-spec
+ type: object
+ additionalProperties: true
+ description: |
+ An object with key value pairs representing labels. K8s Nodes are
+ required to have match all these labels for this Pod to scheduled on
+ them.
+
+ ```yaml
+ disktype: ssd
+ nodetype: awesome
+ ```
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)
+ for more details.
+ tolerations: &tolerations-spec
+ type: array
+ description: |
+ Tolerations allow a pod to be scheduled on nodes with taints. These
+ tolerations are additional tolerations to the tolerations common to
+ all pods of a their respective kind
+ ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),
+ [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).
+
+ Pass this field an array of
+ [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)
+ objects.
+
+ See the [Kubernetes
+ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
+ for more info.
+ activeServerLimit:
+ type: [integer, "null"]
+ description: &jupyterhub-native-config-description |
+ JupyterHub native configuration, see the [JupyterHub
+ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)
+ for more information.
+ allowNamedServers:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s annotations for the hub pod.
+ authenticatePrometheus:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ concurrentSpawnLimit:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ consecutiveFailureLimit:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ podSecurityContext: &podSecurityContext-spec
+ additionalProperties: true
+ description: |
+ A k8s native specification of the pod's security context, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core)
+ for details.
+ containerSecurityContext: &containerSecurityContext-spec
+ type: object
+ additionalProperties: true
+ description: |
+ A k8s native specification of the container's security context, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)
+ for details.
+ deploymentStrategy:
+ type: object
+ additionalProperties: false
+ properties:
+ rollingUpdate:
+ type: [string, "null"]
+ type:
+ type: [string, "null"]
+ description: |
+ JupyterHub does not support running in parallel, due to this we
+ default to using a deployment strategy of Recreate.
+ extraContainers: &extraContainers-spec
+ type: array
+ description: |
+ Additional containers for the Pod. Use a k8s native syntax.
+ extraVolumeMounts: &extraVolumeMounts-spec
+ type: array
+ description: |
+ Additional volume mounts for the Container. Use a k8s native syntax.
+ extraVolumes: &extraVolumes-spec
+ type: array
+ description: |
+ Additional volumes for the Pod. Use a k8s native syntax.
+ livenessProbe: &probe-spec
+ type: object
+ additionalProperties: true
+ required: [enabled]
+ if:
+ properties:
+ enabled:
+ const: true
+ then:
+ description: |
+ This config option is like the k8s native specification of a
+ container probe, except that it also supports an `enabled` boolean
+ flag.
+
+ See [the k8s
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)
+ for more details.
+ readinessProbe: *probe-spec
+ namedServerLimitPerUser:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ redirectToServer:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ resources: &resources-spec
+ type: object
+ additionalProperties: true
+ description: |
+ A k8s native specification of resources, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).
+ lifecycle: &lifecycle-spec
+ type: object
+ additionalProperties: false
+ description: |
+ A k8s native specification of lifecycle hooks on the container, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core).
+ properties:
+ postStart:
+ type: object
+ additionalProperties: true
+ preStop:
+ type: object
+ additionalProperties: true
+ services:
+ type: object
+ additionalProperties: true
+ description: |
+ This is where you register JupyterHub services. For details on how to
+ configure these services in this Helm chart just keep reading but for
+ details on services themselves instead read [JupyterHub's
+ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html).
+
+ ```{note}
+ Only a selection of JupyterHub's configuration options that can be
+ configured for a service are documented below. All configuration set
+ here will be applied even if this Helm chart doesn't recognize it.
+ ```
+
+ JupyterHub's native configuration accepts a list of service objects,
+ this Helm chart only accept a dictionary where each key represents the
+ name of a service and the value is the actual service objects.
+
+ When configuring JupyterHub services via this Helm chart, the `name`
+ field can be omitted as it can be implied by the dictionary key.
+ Further, the `api_token` field can be omitted as it will be
+ automatically generated as of version 1.1.0 of this Helm chart.
+
+ If you have an external service that needs to access the automatically
+ generated api_token for the service, you can access it from the `hub`
+ k8s Secret part of this Helm chart under the key
+ `hub.services.my-service-config-key.apiToken`.
+
+ Here is an example configuration of two services where the first
+ explicitly sets a name and api_token, while the second omits those and
+ lets the name be implied from the key name and the api_token be
+ automatically generated.
+
+ ```yaml
+ hub:
+ services:
+ my-service-1:
+ admin: true
+ name: my-explicitly-set-service-name
+ api_token: my-explicitly-set-api_token
+
+ # the name of the following service will be my-service-2
+ # the api_token of the following service will be generated
+ my-service-2: {}
+ ```
+
+ If you develop a Helm chart depending on the JupyterHub Helm chart and
+ want to let some Pod's environment variable be populated with the
+ api_token of a service registered like above, then do something along
+ these lines.
+
+ ```yaml
+ # ... container specification of a pod ...
+ env:
+ - name: MY_SERVICE_1_API_TOKEN
+ valueFrom:
+ secretKeyRef:
+ # Don't hardcode the name, use the globally accessible
+ # named templates part of the JupyterHub Helm chart.
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ # Note below the use of the configuration key my-service-1
+ # rather than the explicitly set service name.
+ key: hub.services.my-service-1.apiToken
+ ```
+ properties:
+ name:
+ type: string
+ description: |
+ The name can be implied via the key name under which this
+ service is configured, and is due to that allowed to be
+ omitted in this Helm chart configuration of JupyterHub.
+ admin:
+ type: boolean
+ command:
+ type: [string, array]
+ url:
+ type: string
+ api_token:
+ type: [string, "null"]
+ description: |
+ The api_token will be automatically generated if not
+ explicitly set. It will also be exposed in via a k8s Secret
+ part of this Helm chart under a specific key.
+
+ See the documentation under
+ [`hub.services`](schema_hub.services) for details about this.
+ apiToken:
+ type: [string, "null"]
+ description: |
+ An alias for api_token provided for backward compatibility by
+ the JupyterHub Helm chart that will be transformed to
+ api_token.
+ loadRoles:
+ type: object
+ additionalProperties: true
+ description: |
+ This is where you should define JupyterHub roles and apply them to
+ JupyterHub users, groups, and services to grant them additional
+ permissions as defined in JupyterHub's RBAC system.
+
+ Complement this documentation with [JupyterHub's
+ documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles)
+ about `load_roles`.
+
+ Note that while JupyterHub's native configuration `load_roles` accepts
+ a list of role objects, this Helm chart only accepts a dictionary where
+ each key represents the name of a role and the value is the actual
+ role object.
+
+ ```yaml
+ hub:
+ loadRoles:
+ teacher:
+ description: Access to users' information and group membership
+
+ # this role provides permissions to...
+ scopes: [users, groups]
+
+ # this role will be assigned to...
+ users: [erik]
+ services: [grading-service]
+ groups: [teachers]
+ ```
+
+ When configuring JupyterHub roles via this Helm chart, the `name`
+ field can be omitted as it can be implied by the dictionary key.
+ shutdownOnLogout:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ templatePaths:
+ type: array
+ description: *jupyterhub-native-config-description
+ templateVars:
+ type: object
+ additionalProperties: true
+ description: *jupyterhub-native-config-description
+ serviceAccount: &serviceAccount
+ type: object
+ required: [create]
+ additionalProperties: false
+ description: |
+ Configuration for a k8s ServiceAccount dedicated for use by the
+ specific pod which this configuration is nested under.
+ properties:
+ create:
+ type: boolean
+ description: |
+ Whether or not to create the `ServiceAccount` resource.
+ name:
+ type: ["string", "null"]
+ description: |
+ This configuration serves multiple purposes:
+
+ - It will be the `serviceAccountName` referenced by related Pods.
+ - If `create` is set, the created ServiceAccount resource will be named like this.
+ - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.
+
+ If not explicitly provided, a default name will be used.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Kubernetes annotations to apply to the k8s ServiceAccount.
+ extraPodSpec: &extraPodSpec-spec
+ type: object
+ additionalProperties: true
+ description: |
+ Arbitrary extra k8s pod specification as a YAML object. The default
+ value of this setting is an empty object, i.e. no extra configuration.
+ The value of this property is augmented to the pod specification as-is.
+
+ This is a powerful tool for expert k8s administrators with advanced
+ configuration requirements. This setting should only be used for
+ configuration that cannot be accomplished through the other settings.
+ Misusing this setting can break your deployment and/or compromise
+ your system security.
+
+ This is one of four related settings for inserting arbitrary pod
+ specification:
+
+ 1. hub.extraPodSpec
+ 2. proxy.chp.extraPodSpec
+ 3. proxy.traefik.extraPodSpec
+ 4. scheduling.userScheduler.extraPodSpec
+
+ One real-world use of these settings is to enable host networking. For
+ example, to configure host networking for the hub pod, add the
+ following to your helm configuration values:
+
+ ```yaml
+ hub:
+ extraPodSpec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ ```
+
+ Likewise, to configure host networking for the proxy pod, add the
+ following:
+
+ ```yaml
+ proxy:
+ chp:
+ extraPodSpec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ ```
+
+ N.B. Host networking has special security implications and can easily
+ break your deployment. This is an example—not an endorsement.
+
+ See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)
+ for the latest pod resource specification.
+
+ proxy:
+ type: object
+ additionalProperties: false
+ properties:
+ chp:
+ type: object
+ additionalProperties: false
+ description: |
+ Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic
+ both to itself and to user pods.
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ networkPolicy: *networkPolicy-spec
+ extraCommandLineFlags:
+ type: array
+ description: |
+ A list of strings to be added as command line options when
+ starting
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options)
+ that will be expanded with Helm's template function `tpl` which
+ can render Helm template logic inside curly braces (`{{ ... }}`).
+
+ ```yaml
+ proxy:
+ chp:
+ extraCommandLineFlags:
+ - "--auto-rewrite"
+ - "--custom-header {{ .Values.myCustomStuff }}"
+ ```
+
+ Note that these will be appended last, and if you provide the same
+ flag twice, the last flag will be used, which mean you can
+ override the default flag values as well.
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the chp pod.
+
+ Environment variables are usually used here to:
+ - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values
+ - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ proxy:
+ chp:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ CHP_NAMESPACE:
+ name: CHP_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_CHP_NAMESPACE:
+ value: "my-prefix-$(CHP_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ livenessProbe: *probe-spec
+ readinessProbe: *probe-spec
+ resources: *resources-spec
+ defaultTarget:
+ type: [string, "null"]
+ description: |
+ Override the URL for the default routing target for the proxy.
+ Defaults to JupyterHub itself.
+ This will generally only have an effect while JupyterHub is not running,
+ as JupyterHub adds itself as the default target after it starts.
+ errorTarget:
+ type: [string, "null"]
+ description: |
+ Override the URL for the error target for the proxy.
+ Defaults to JupyterHub itself.
+ Useful to reduce load on the Hub
+ or produce more informative error messages than the Hub's default,
+ e.g. in highly customized deployments such as BinderHub.
+ See Configurable HTTP Proxy for details on implementing an error target.
+ extraPodSpec: *extraPodSpec-spec
+ secretToken:
+ type: [string, "null"]
+ description: |
+ ```{note}
+ As of version 1.0.0 this will automatically be generated and there is
+ no need to set it manually.
+
+ If you wish to reset a generated key, you can use `kubectl edit` on
+ the k8s Secret typically named `hub` and remove the
+ `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret,
+ then perform a new `helm upgrade`.
+ ```
+
+ A 32-byte cryptographically secure randomly generated string used to
+ secure communications between the hub pod and the proxy pod running a
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
+ instance.
+
+ ```sh
+ # to generate a value, run
+ openssl rand -hex 32
+ ```
+
+ Changing this value will cause the proxy and hub pods to restart. It is good security
+ practice to rotate these values over time. If this secret leaks, *immediately* change
+ it to something else, or user data can be compromised.
+ service:
+ type: object
+ additionalProperties: false
+ description: |
+ Configuration of the k8s Service `proxy-public` which either will
+ point to the `autohttps` pod running Traefik for TLS termination, or
+ the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from
+ users on the internet should always go through this k8s Service.
+
+ When this service targets the `autohttps` pod which then routes to the
+ `proxy` pod, a k8s Service named `proxy-http` will be added targeting
+ the `proxy` pod and only accepting HTTP traffic on port 80.
+ properties:
+ type:
+ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
+ description: |
+ Default `LoadBalancer`.
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
+ to learn more about service types.
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the proxy service.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the service that is exposing the proxy.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ nodePorts:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to set NodePorts to expose the service on for http and https.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
+ for more details about NodePorts.
+ properties:
+ http:
+ type: [integer, "null"]
+ description: |
+ The HTTP port the proxy-public service should be exposed on.
+ https:
+ type: [integer, "null"]
+ description: |
+ The HTTPS port the proxy-public service should be exposed on.
+ disableHttpPort:
+ type: boolean
+ description: |
+ Default `false`.
+
+ If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port.
+ extraPorts:
+ type: array
+ description: |
+ Extra ports the k8s Service should accept incoming traffic on,
+ which will be redirected to either the `autohttps` pod (treafik)
+ or the `proxy` pod (chp).
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core)
+ for the structure of the items in this list.
+ loadBalancerIP:
+ type: [string, "null"]
+ description: |
+ The public IP address the proxy-public Kubernetes service should
+ be exposed on. This entry will end up at the configurable proxy
+ server that JupyterHub manages, which will direct traffic to user
+ pods at the `/user` path and the hub pod at the `/hub` path.
+
+ Set this if you want to use a fixed external IP address instead of
+ a dynamically acquired one. This is relevant if you have a domain
+ name that you want to point to a specific IP and want to ensure it
+ doesn't change.
+ loadBalancerSourceRanges:
+ type: array
+ description: |
+ A list of IP CIDR ranges that are allowed to access the load balancer service.
+ Defaults to allowing everyone to access it.
+ https:
+ type: object
+ additionalProperties: false
+ description: |
+ Object for customizing the settings for HTTPS used by the JupyterHub's proxy.
+ For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https)
+ properties:
+ enabled:
+ type: [boolean, "null"]
+ description: |
+ Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided.
+ type:
+ enum: [null, "", letsencrypt, manual, offload, secret]
+ description: |
+ The type of HTTPS encryption that is used.
+ Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object.
+ Defaults to `letsencrypt`.
+ letsencrypt:
+ type: object
+ additionalProperties: false
+ properties:
+ contactEmail:
+ type: [string, "null"]
+ description: |
+ The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https).
+ Required for automatic HTTPS.
+ acmeServer:
+ type: [string, "null"]
+ description: |
+ Let's Encrypt is one of various ACME servers that can provide
+ a certificate, and by default their production server is used.
+
+ Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory
+ Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory
+ manual:
+ type: object
+ additionalProperties: false
+ description: |
+ Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`.
+ See [Set up manual HTTPS](setup-manual-https)
+ properties:
+ key:
+ type: [string, "null"]
+ description: |
+ The RSA private key to be used for HTTPS.
+ To be provided in the form of
+
+ ```
+ key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ ...
+ -----END RSA PRIVATE KEY-----
+ ```
+ cert:
+ type: [string, "null"]
+ description: |
+ The certificate to be used for HTTPS.
+ To be provided in the form of
+
+ ```
+ cert: |
+ -----BEGIN CERTIFICATE-----
+ ...
+ -----END CERTIFICATE-----
+ ```
+ secret:
+ type: object
+ additionalProperties: false
+ description: |
+ Secret to be provided when setting `https.type` to `secret`.
+ properties:
+ name:
+ type: [string, "null"]
+ description: |
+ Name of the secret
+ key:
+ type: [string, "null"]
+ description: |
+ Path to the private key to be used for HTTPS.
+ Example: `'tls.key'`
+ crt:
+ type: [string, "null"]
+ description: |
+ Path to the certificate to be used for HTTPS.
+ Example: `'tls.crt'`
+ hosts:
+ type: array
+ description: |
+ You domain in list form.
+ Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https).
+ To be provided like:
+ ```
+ hosts:
+ -
+ ```
+ traefik:
+ type: object
+ additionalProperties: false
+ description: |
+ Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the traefik pod.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ networkPolicy: *networkPolicy-spec
+ extraInitContainers:
+ type: array
+ description: |
+ list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ proxy:
+ traefik:
+ extraInitContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the traefik pod.
+
+ Environment Variables here may be used to configure traefik.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ proxy:
+ traefik:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ TRAEFIK_NAMESPACE:
+ name: TRAEFIK_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_TRAEFIK_NAMESPACE:
+ value: "my-prefix-$(TRAEFIK_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ extraDynamicConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ This refers to traefik's post-startup configuration.
+
+ This Helm chart already provide such configuration, so this is a
+ place where you can merge in additional configuration. If you are
+ about to use this configuration, you may want to inspect the
+ default configuration declared
+ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml).
+ extraPorts:
+ type: array
+ description: |
+ Extra ports for the traefik container within the autohttps pod
+ that you would like to expose, formatted in a k8s native way.
+ extraStaticConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ This refers to traefik's startup configuration.
+
+ This Helm chart already provide such configuration, so this is a
+ place where you can merge in additional configuration. If you are
+ about to use this configuration, you may want to inspect the
+ default configuration declared
+ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml).
+ extraVolumes: *extraVolumes-spec
+ extraVolumeMounts: *extraVolumeMounts-spec
+ hsts:
+ type: object
+ additionalProperties: false
+ required: [includeSubdomains, maxAge, preload]
+ description: |
+ This section regards a HTTP Strict-Transport-Security (HSTS)
+ response header. It can act as a request for a visiting web
+ browsers to enforce HTTPS on their end in for a given time into
+ the future, and optionally also for future requests to subdomains.
+
+ These settings relate to traefik configuration which we use as a
+ TLS termination proxy.
+
+ See [Mozilla's
+ documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
+ for more information.
+ properties:
+ includeSubdomains:
+ type: boolean
+ maxAge:
+ type: integer
+ preload:
+ type: boolean
+ image: *image-spec
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ extraPodSpec: *extraPodSpec-spec
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s labels for the proxy pod.
+
+ ```{note}
+ For consistency, this should really be located under
+ proxy.chp.labels but isn't for historical reasons.
+ ```
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s annotations for the proxy pod.
+
+ ```{note}
+ For consistency, this should really be located under
+ proxy.chp.annotations but isn't for historical reasons.
+ ```
+ deploymentStrategy:
+ type: object
+ additionalProperties: false
+ properties:
+ rollingUpdate:
+ type: [string, "null"]
+ type:
+ type: [string, "null"]
+ description: |
+ While the proxy pod running
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
+ could run in parallel, two instances running in parallel wouldn't
+ both receive updates from JupyterHub regarding how it should route
+ traffic. Due to this we default to using a deployment strategy of
+ Recreate instead of RollingUpdate.
+ secretSync:
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration section refers to configuration of the sidecar
+ container in the autohttps pod running next to its traefik container
+ responsible for TLS termination.
+
+ The purpose of this container is to store away and load TLS
+ certificates from a k8s Secret. The TLS certificates are acquired by
+ the ACME client (LEGO) that is running within the traefik container,
+ where traefik is using them for TLS termination.
+ properties:
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ resources: *resources-spec
+
+ singleuser:
+ type: object
+ additionalProperties: false
+ description: |
+ Options for customizing the environment that is provided to the users after they log in.
+ properties:
+ networkPolicy: *networkPolicy-spec
+ podNameTemplate:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template).
+ cpu:
+ type: object
+ additionalProperties: false
+ description: |
+ Set CPU limits & guarantees that are enforced for each user.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
+ for more info.
+ properties:
+ limit:
+ type: [number, "null"]
+ guarantee:
+ type: [number, "null"]
+ memory:
+ type: object
+ additionalProperties: false
+ description: |
+ Set Memory limits & guarantees that are enforced for each user.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
+ for more info.
+ properties:
+ limit:
+ type: [number, string, "null"]
+ guarantee:
+ type: [number, string, "null"]
+ description: |
+ Note that this field is referred to as *requests* by the Kubernetes API.
+ image: *image-spec
+ initContainers:
+ type: array
+ description: |
+ list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ singleuser:
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ profileList:
+ type: array
+ description: |
+ For more information about the profile list, see [KubeSpawner's
+ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner)
+ as this is simply a passthrough to that configuration.
+
+ ```{note}
+ The image-pullers are aware of the overrides of images in
+ `singleuser.profileList` but they won't be if you configure it in
+ JupyterHub's configuration of '`c.KubeSpawner.profile_list`.
+ ```
+
+ ```yaml
+ singleuser:
+ profileList:
+ - display_name: "Default: Shared, 8 CPU cores"
+ description: "Your code will run on a shared machine with CPU only."
+ default: True
+ - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU"
+ description: "Your code will run a personal machine with a GPU."
+ kubespawner_override:
+ extra_resource_limits:
+ nvidia.com/gpu: "1"
+ ```
+ extraFiles: *extraFiles
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the user pods.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes. Note that the user pods will already have
+ access to a set of environment variables that you can use, like
+ `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these
+ inspect [this source
+ code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763).
+
+ ```yaml
+ singleuser:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ USER_NAMESPACE:
+ name: USER_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_USER_NAMESPACE:
+ value: "my-prefix-$(USER_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ nodeSelector: *nodeSelector-spec
+ extraTolerations: *tolerations-spec
+ extraNodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Affinities describe where pods prefer or require to be scheduled, they
+ may prefer or require a node where they are to be scheduled to have a
+ certain label (node affinity). They may also require to be scheduled
+ in proximity or with a lack of proximity to another pod (pod affinity
+ and anti pod affinity).
+
+ See the [Kubernetes
+ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
+ for more info.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core)
+ objects.
+ extraPodAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ See the description of `singleuser.extraNodeAffinity`.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
+ objects.
+ extraPodAntiAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ See the description of `singleuser.extraNodeAffinity`.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
+ objects.
+ cloudMetadata:
+ type: object
+ additionalProperties: false
+ required: [blockWithIptables, ip]
+ description: |
+ Please refer to dedicated section in [the Helm chart
+ documentation](block-metadata-iptables) for more information about
+ this.
+ properties:
+ blockWithIptables:
+ type: boolean
+ ip:
+ type: string
+
+ cmd:
+ type: [array, string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd).
+ The default is "jupyterhub-singleuser".
+ Use `cmd: null` to launch a custom CMD from the image,
+ which must launch jupyterhub-singleuser or an equivalent process eventually.
+ For example: Jupyter's docker-stacks images.
+ defaultUrl:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url).
+ # FIXME: name mismatch, named events_enabled in kubespawner
+ events:
+ type: [boolean, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled).
+ extraAnnotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations).
+ extraContainers:
+ type: array
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers).
+ extraLabels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels).
+ extraPodConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config).
+ extraResource:
+ type: object
+ additionalProperties: false
+ properties:
+ # FIXME: name mismatch, named extra_resource_guarantees in kubespawner
+ guarantees:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees).
+ # FIXME: name mismatch, named extra_resource_limits in kubespawner
+ limits:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits).
+ fsGid:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid).
+ lifecycleHooks:
+ type: object
+ additionalProperties: false
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks).
+ properties:
+ postStart:
+ type: object
+ additionalProperties: true
+ preStop:
+ type: object
+ additionalProperties: true
+ networkTools:
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration section refers to configuration of a conditionally
+ created initContainer for the user pods with a purpose to block a
+ specific IP address.
+
+ This initContainer will be created if
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ is set to true.
+ properties:
+ image: *image-spec
+ resources: *resources-spec
+ # FIXME: name mismatch, named service_account in kubespawner
+ serviceAccountName:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account).
+ startTimeout:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout).
+ storage:
+ type: object
+ additionalProperties: false
+ required: [type, homeMountPath]
+ description: |
+ This section configures KubeSpawner directly to some extent but also
+ indirectly through Helm chart specific configuration options such as
+ [`singleuser.storage.type`](schema_singleuser.storage.type).
+ properties:
+ capacity:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_capacity`.
+
+ See the [KubeSpawner
+ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html)
+ for more information.
+ dynamic:
+ type: object
+ additionalProperties: false
+ properties:
+ pvcNameTemplate:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.pvc_name_template` which will be the
+ resource name of the PVC created by KubeSpawner for each user
+ if needed.
+ storageAccessModes:
+ type: array
+ items:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_access_modes`.
+
+ See KubeSpawners documentation and [the k8s
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
+ for more information.
+ storageClass:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_class`, which can be an
+ explicit StorageClass to dynamically provision storage for the
+ PVC that KubeSpawner will create.
+
+ There is of a default StorageClass available in k8s clusters
+ for use if this is unspecified.
+ volumeNameTemplate:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.volume_name_template`, which is the
+ name to reference from the containers volumeMounts section.
+ extraLabels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Configures `KubeSpawner.storage_extra_labels`. Note that these
+ labels are set on the PVC during creation only and won't be
+ updated after creation.
+ extraVolumeMounts: *extraVolumeMounts-spec
+ extraVolumes: *extraVolumes-spec
+ homeMountPath:
+ type: string
+ description: |
+ The location within the container where the home folder storage
+ should be mounted.
+ static:
+ type: object
+ additionalProperties: false
+ properties:
+ pvcName:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.pvc_claim_name` to reference
+ pre-existing storage.
+ subPath:
+ type: [string, "null"]
+ description: |
+ Configures the `subPath` field of a
+ `KubeSpawner.volume_mounts` entry added by the Helm chart.
+
+ Path within the volume from which the container's volume
+ should be mounted.
+ type:
+ enum: [dynamic, static, none]
+ description: |
+ Decide if you want storage to be provisioned dynamically
+ (dynamic), or if you want to attach existing storage (static), or
+ don't want any storage to be attached (none).
+ allowPrivilegeEscalation:
+ type: [boolean, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation).
+ uid:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid).
+
+ This dictates as what user the main container will start up as.
+
+ As an example of when this is needed, consider if you want to enable
+ sudo rights for some of your users. This can be done by starting up as
+ root, enabling it from the container in a startup script, and then
+ transitioning to the normal user.
+
+ Default is 1000, set to null to use the container's default.
+
+ scheduling:
+ type: object
+ additionalProperties: false
+ description: |
+ Objects for customizing the scheduling of various pods on the nodes and
+ related labels.
+ properties:
+ userScheduler:
+ type: object
+ additionalProperties: false
+ required: [enabled, plugins, pluginConfig, logLevel]
+ description: |
+ The user scheduler is making sure that user pods are scheduled
+ tight on nodes, this is useful for autoscaling of user node pools.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enables the user scheduler.
+ revisionHistoryLimit: *revisionHistoryLimit
+ replicas:
+ type: integer
+ description: |
+ You can have multiple schedulers to share the workload or improve
+ availability on node failure.
+ image: *image-spec
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the userScheduler pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra annotations to add to the user-scheduler pods.
+ containerSecurityContext: *containerSecurityContext-spec
+ logLevel:
+ type: integer
+ description: |
+ Corresponds to the verbosity level of logging made by the
+ kube-scheduler binary running within the user-scheduler pod.
+ plugins:
+ type: object
+ additionalProperties: true
+ description: |
+ These plugins refers to kube-scheduler plugins as documented
+ [here](https://kubernetes.io/docs/reference/scheduling/config/).
+
+ The user-scheduler is really just a kube-scheduler configured in a
+ way to pack users tight on nodes using these plugins. See
+ values.yaml for information about the default plugins.
+ pluginConfig:
+ type: array
+ description: |
+ Individually activated plugins can be configured further.
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ extraPodSpec: *extraPodSpec-spec
+ podPriority:
+ type: object
+ additionalProperties: false
+ description: |
+ Pod Priority is used to allow real users evict user placeholder pods
+ that in turn by entering a Pending state can trigger a scale up by a
+ cluster autoscaler.
+
+ Having this option enabled only make sense if the following conditions
+ are met:
+
+ 1. A cluster autoscaler is installed.
+ 2. user-placeholer pods are configured to have a priority equal or
+ higher than the cluster autoscaler's "priority cutoff" so that the
+ cluster autoscaler scales up a node in advance for a pending user
+ placeholder pod.
+ 3. Normal user pods have a higher priority than the user-placeholder
+ pods.
+ 4. Image puller pods have a priority between normal user pods and
+ user-placeholder pods.
+
+ Note that if the default priority cutoff if not configured on cluster
+ autoscaler, it will currently default to 0, and that in the future
+ this is meant to be lowered. If your cloud provider is installing the
+ cluster autoscaler for you, they may also configure this specifically.
+
+ Recommended settings for a cluster autoscaler...
+
+ ... with a priority cutoff of -10 (GKE):
+
+ ```yaml
+ podPriority:
+ enabled: true
+ globalDefault: false
+ defaultPriority: 0
+ imagePullerPriority: -5
+ userPlaceholderPriority: -10
+ ```
+
+ ... with a priority cutoff of 0:
+
+ ```yaml
+ podPriority:
+ enabled: true
+ globalDefault: true
+ defaultPriority: 10
+ imagePullerPriority: 5
+ userPlaceholderPriority: 0
+ ```
+ properties:
+ enabled:
+ type: boolean
+ globalDefault:
+ type: boolean
+ description: |
+ Warning! This will influence all pods in the cluster.
+
+ The priority a pod usually get is 0. But this can be overridden
+ with a PriorityClass resource if it is declared to be the global
+ default. This configuration option allows for the creation of such
+ global default.
+ defaultPriority:
+ type: integer
+ description: |
+ The actual value for the default pod priority.
+ imagePullerPriority:
+ type: integer
+ description: |
+ The actual value for the [hook|continuous]-image-puller pods' priority.
+ userPlaceholderPriority:
+ type: integer
+ description: |
+ The actual value for the user-placeholder pods' priority.
+ userPlaceholder:
+ type: object
+ additionalProperties: false
+ description: |
+ User placeholders simulate users but will thanks to PodPriority be
+ evicted by the cluster autoscaler if a real user shows up. In this way
+ placeholders allow you to create a headroom for the real users and
+ reduce the risk of a user having to wait for a node to be added. Be
+ sure to use the the continuous image puller as well along with
+ placeholders, so the images are also available when real users arrive.
+
+ To test your setup efficiently, you can adjust the amount of user
+ placeholders with the following command:
+ ```sh
+ # Configure to have 3 user placeholders
+ kubectl scale sts/user-placeholder --replicas=3
+ ```
+ properties:
+ enabled:
+ type: boolean
+ image: *image-spec
+ revisionHistoryLimit: *revisionHistoryLimit
+ replicas:
+ type: integer
+ description: |
+ How many placeholder pods would you like to have?
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the userPlaceholder pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra annotations to add to the placeholder pods.
+ resources:
+ type: object
+ additionalProperties: true
+ description: |
+ Unless specified here, the placeholder pods will request the same
+ resources specified for the real singleuser pods.
+ containerSecurityContext: *containerSecurityContext-spec
+ corePods:
+ type: object
+ additionalProperties: false
+ description: |
+ These settings influence the core pods like the hub, proxy and
+ user-scheduler pods.
+ These settings influence all pods considered core pods, namely:
+
+ - hub
+ - proxy
+ - autohttps
+ - hook-image-awaiter
+ - user-scheduler
+
+ By defaults, the tolerations are:
+
+ - hub.jupyter.org/dedicated=core:NoSchedule
+ - hub.jupyter.org_dedicated=core:NoSchedule
+
+ Note that tolerations set here are combined with the respective
+ components dedicated tolerations, and that `_` is available in case
+ `/` isn't allowed in the clouds tolerations.
+ properties:
+ tolerations: *tolerations-spec
+ nodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Where should pods be scheduled? Perhaps on nodes with a certain
+ label is preferred or even required?
+ properties:
+ matchNodePurpose:
+ enum: [ignore, prefer, require]
+ description: |
+ Decide if core pods *ignore*, *prefer* or *require* to
+ schedule on nodes with this label:
+ ```
+ hub.jupyter.org/node-purpose=core
+ ```
+ userPods:
+ type: object
+ additionalProperties: false
+ description: |
+ These settings influence all pods considered user pods, namely:
+
+ - user-placeholder
+ - hook-image-puller
+ - continuous-image-puller
+ - jupyter-
+
+ By defaults, the tolerations are:
+
+ - hub.jupyter.org/dedicated=core:NoSchedule
+ - hub.jupyter.org_dedicated=core:NoSchedule
+
+ Note that tolerations set here are combined with the respective
+ components dedicated tolerations, and that `_` is available in case
+ `/` isn't allowed in the clouds tolerations.
+ properties:
+ tolerations: *tolerations-spec
+ nodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Where should pods be scheduled? Perhaps on nodes with a certain
+ label is preferred or even required?
+ properties:
+ matchNodePurpose:
+ enum: [ignore, prefer, require]
+ description: |
+ Decide if user pods *ignore*, *prefer* or *require* to
+ schedule on nodes with this label:
+ ```
+ hub.jupyter.org/node-purpose=user
+ ```
+
+ ingress:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enable the creation of a Kubernetes Ingress to proxy-public service.
+
+ See [Advanced Topics — Zero to JupyterHub with Kubernetes
+ 0.7.0 documentation](ingress)
+ for more details.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the Ingress resource.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ ingressClassName:
+ type: [string, "null"]
+ description: |
+ Maps directly to the Ingress resource's `spec.ingressClassName``.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class)
+ for more details.
+ hosts:
+ type: array
+ description: |
+ List of hosts to route requests to the proxy.
+ pathSuffix:
+ type: [string, "null"]
+ description: |
+ Suffix added to Ingress's routing path pattern.
+
+ Specify `*` if your ingress matches path by glob pattern.
+ pathType:
+ enum: [Prefix, Exact, ImplementationSpecific]
+ description: |
+ The path type to use. The default value is 'Prefix'.
+
+ See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
+ for more details about path types.
+ tls:
+ type: array
+ description: |
+ TLS configurations for Ingress.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)
+ for more details about annotations.
+
+ prePuller:
+ type: object
+ additionalProperties: false
+ required: [hook, continuous]
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the pre puller job pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the hook and continous image puller pods. One example use case is to
+ disable istio sidecars which could interfere with the image pulling.
+ resources:
+ type: object
+ additionalProperties: true
+ description: |
+ These are standard Kubernetes resources with requests and limits for
+ cpu and memory. They will be used on the containers in the pods
+ pulling images. These should be set extremely low as the containers
+ shut down directly or is a pause container that just idles.
+
+ They were made configurable as usage of ResourceQuota may require
+ containers in the namespace to have explicit resources set.
+ extraTolerations: *tolerations-spec
+ hook:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ See the [*optimization
+ section*](pulling-images-before-users-arrive)
+ for more details.
+ properties:
+ enabled:
+ type: boolean
+ pullOnlyOnChanges:
+ type: boolean
+ description: |
+ Pull only if changes have been made to the images to pull, or more
+ accurately if the hook-image-puller daemonset has changed in any
+ way.
+ podSchedulingWaitDuration:
+ description: |
+ The `hook-image-awaiter` has a criteria to await all the
+ `hook-image-puller` DaemonSet's pods to both schedule and finish
+ their image pulling. This flag can be used to relax this criteria
+ to instead only await the pods that _has already scheduled_ to
+ finish image pulling after a certain duration.
+
+ The value of this is that sometimes the newly created
+ `hook-image-puller` pods cannot be scheduled because nodes are
+ full, and then it probably won't make sense to block a `helm
+ upgrade`.
+
+ An infinite duration to wait for pods to schedule can be
+ represented by `-1`. This was the default behavior of version
+ 0.9.0 and earlier.
+ type: integer
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ continuous:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ See the [*optimization
+ section*](pulling-images-before-users-arrive)
+ for more details.
+
+ ```{note}
+ If used with a Cluster Autoscaler (an autoscaling node pool), also add
+ user-placeholders and enable pod priority.
+ ```
+ properties:
+ enabled:
+ type: boolean
+ pullProfileListImages:
+ type: boolean
+ description: |
+ The singleuser.profileList configuration can provide a selection of
+ images. This option determines if all images identified there should
+ be pulled, both by the hook and continuous pullers.
+
+ Images are looked for under `kubespawner_override`, and also
+ `profile_options.choices.kubespawner_override` since version 3.2.0.
+
+ The reason to disable this, is that if you have for example 10 images
+ which start pulling in order from 1 to 10, a user that arrives and
+ wants to start a pod with image number 10 will need to wait for all
+ images to be pulled, and then it may be preferable to just let the
+ user arriving wait for a single image to be pulled on arrival.
+ extraImages:
+ type: object
+ additionalProperties: false
+ description: |
+ See the [*optimization section*](images-that-will-be-pulled) for more
+ details.
+
+ ```yaml
+ prePuller:
+ extraImages:
+ my-extra-image-i-want-pulled:
+ name: jupyter/all-spark-notebook
+ tag: 2343e33dec46
+ ```
+ patternProperties:
+ ".*":
+ type: object
+ additionalProperties: false
+ required: [name, tag]
+ properties:
+ name:
+ type: string
+ tag:
+ type: string
+ containerSecurityContext: *containerSecurityContext-spec
+ pause:
+ type: object
+ additionalProperties: false
+ description: |
+ The image-puller pods rely on initContainer to pull all images, and
+ their actual container when they are done is just running a `pause`
+ container. These are settings for that pause container.
+ properties:
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+
+ custom:
+ type: object
+ additionalProperties: true
+ description: |
+ Additional values to pass to the Hub.
+ JupyterHub will not itself look at these,
+ but you can read values in your own custom config via `hub.extraConfig`.
+ For example:
+
+ ```yaml
+ custom:
+ myHost: "https://example.horse"
+ hub:
+ extraConfig:
+ myConfig.py: |
+ c.MyAuthenticator.host = get_config("custom.myHost")
+ ```
+
+ cull:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ The
+ [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler)
+ can run as a JupyterHub managed service to _cull_ running servers.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enable/disable use of jupyter-idle-culler.
+ users:
+ type: [boolean, "null"]
+ description: See the `--cull-users` flag.
+ adminUsers:
+ type: [boolean, "null"]
+ description: See the `--cull-admin-users` flag.
+ removeNamedServers:
+ type: [boolean, "null"]
+ description: See the `--remove-named-servers` flag.
+ timeout:
+ type: [integer, "null"]
+ description: See the `--timeout` flag.
+ every:
+ type: [integer, "null"]
+ description: See the `--cull-every` flag.
+ concurrency:
+ type: [integer, "null"]
+ description: See the `--concurrency` flag.
+ maxAge:
+ type: [integer, "null"]
+ description: See the `--max-age` flag.
+
+ debug:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Increases the loglevel throughout the resources in the Helm chart.
+
+ rbac:
+ type: object
+ additionalProperties: false
+ required: [create]
+ properties:
+ enabled:
+ type: boolean
+ # This schema entry is needed to help us print a more helpful error
+ # message in NOTES.txt if hub.fsGid is set.
+ #
+ description: |
+ ````{note}
+ Removed in version 2.0.0. If you have been using `rbac.enable=false`
+ (strongly discouraged), then the equivalent configuration would be:
+
+ ```yaml
+ rbac:
+ create: false
+ hub:
+ serviceAccount:
+ create: false
+ proxy:
+ traefik:
+ serviceAccount:
+ create: false
+ scheduling:
+ userScheduler:
+ serviceAccount:
+ create: false
+ prePuller:
+ hook:
+ serviceAccount:
+ create: false
+ ```
+ ````
+ create:
+ type: boolean
+ description: |
+ Decides if (Cluster)Role and (Cluster)RoleBinding resources are
+ created and bound to the configured serviceAccounts.
+
+ global:
+ type: object
+ additionalProperties: true
+ properties:
+ safeToShowValues:
+ type: boolean
+ description: |
+ A flag that should only be set to true temporarily when experiencing a
+ deprecation message that contain censored content that you wish to
+ reveal.
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
index 2f5cbca3..41e108d6 100755
--- a/applications/jupyterhub/deploy/values.yaml
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -1,4 +1,4 @@
-harness:
+harness: # EDIT: CLOUDHARNESS
subdomain: hub
service:
auto: false
@@ -31,6 +31,11 @@ harness:
fullnameOverride: ""
nameOverride:
+# enabled is ignored by the jupyterhub chart itself, but a chart depending on
+# the jupyterhub chart conditionally can make use this config option as the
+# condition.
+enabled:
+
# custom can contain anything you want to pass to the hub pod, as all passed
# Helm template values will be made available there.
custom: {}
@@ -54,10 +59,11 @@ imagePullSecrets: []
# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
# the proxy pod.
hub:
+ revisionHistoryLimit:
config:
JupyterHub:
admin_access: true
- authenticator_class: keycloak
+ authenticator_class: keycloak # EDIT: CLOUDHARNESS
service:
type: ClusterIP
annotations: {}
@@ -68,7 +74,6 @@ hub:
baseUrl: /
cookieSecret:
initContainers: []
- fsGid: 1000
nodeSelector: {}
tolerations: []
concurrentSpawnLimit: 64
@@ -106,37 +111,38 @@ hub:
extraVolumes: []
extraVolumeMounts: []
image:
- name: jupyterhub/k8s-hub
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-hub
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
+ podSecurityContext:
+ fsGroup: 1000
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
lifecycle: {}
+ loadRoles: {}
services: {}
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
networkPolicy:
- enabled: false
+ enabled: true
ingress: []
- ## egress for JupyterHub already includes Kubernetes internal DNS and
- ## access to the proxy, but can be restricted further, but ensure to allow
- ## access to the Kubernetes API server that couldn't be pinned ahead of
- ## time.
- ##
- ## ref: https://stackoverflow.com/a/59016417/2220152
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
- allowNamedServers: true
+ allowNamedServers: true # EDIT: CLOUDHARNESS
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
@@ -163,11 +169,13 @@ hub:
timeoutSeconds: 1
existingSecret:
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
rbac:
- enabled: true
+ create: true
# proxy relates to the proxy pod, the proxy-public service, and the autohttps
# pod and proxy-http service.
@@ -202,7 +210,7 @@ proxy:
rollingUpdate:
# service relates to the proxy-public service
service:
- type: NodePort
+ type: NodePort # EDIT: CLOUDHARNESS
labels: {}
annotations: {}
nodePorts:
@@ -215,13 +223,17 @@ proxy:
# chp relates to the proxy pod, which is responsible for routing traffic based
# on dynamic configuration sent from JupyterHub to CHP's REST API.
chp:
+ revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: jupyterhub/configurable-http-proxy
- tag: 4.5.0 # https://github.com/jupyterhub/configurable-http-proxy/releases
+ name: quay.io/jupyterhub/configurable-http-proxy
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy:
pullSecrets: []
extraCommandLineFlags: []
@@ -229,11 +241,14 @@ proxy:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
+ failureThreshold: 30
+ timeoutSeconds: 3
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
+ timeoutSeconds: 1
resources: {}
defaultTarget:
errorTarget:
@@ -241,12 +256,16 @@ proxy:
nodeSelector: {}
tolerations: []
networkPolicy:
- enabled: false
+ enabled: true
ingress: []
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
@@ -257,13 +276,17 @@ proxy:
# traefik relates to the autohttps pod, which is responsible for TLS
# termination when proxy.https.type=letsencrypt.
traefik:
+ revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: traefik
- tag: v2.4.11 # ref: https://hub.docker.com/_/traefik?tab=tags
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "v2.10.7" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
@@ -272,6 +295,7 @@ proxy:
maxAge: 15724800 # About 6 months
resources: {}
labels: {}
+ extraInitContainers: []
extraEnv: {}
extraVolumes: []
extraVolumeMounts: []
@@ -283,10 +307,14 @@ proxy:
networkPolicy:
enabled: true
ingress: []
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
@@ -294,6 +322,8 @@ proxy:
maxUnavailable:
minAvailable: 1
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
secretSync:
@@ -302,8 +332,8 @@ proxy:
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: jupyterhub/k8s-secret-sync
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-secret-sync
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
@@ -342,29 +372,27 @@ singleuser:
preferred: []
networkTools:
image:
- name: jupyterhub/k8s-network-tools
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-network-tools
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
+ resources: {}
cloudMetadata:
# block set to true will append a privileged initContainer using the
# iptables to block the sensitive metadata server at the provided ip.
- blockWithIptables: false
+ blockWithIptables: true
+ ip: 169.254.169.254
networkPolicy:
- enabled: false
+ enabled: true
ingress: []
- egress:
- # Required egress to communicate with the hub and DNS servers will be
- # augmented to these egress rules.
- #
- # This default rule explicitly allows all outbound traffic from singleuser
- # pods, except to a typical IP used to return metadata that can be used by
- # someone with malicious intent.
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
- except:
- - 169.254.169.254/32
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: false
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: false
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
@@ -376,6 +404,7 @@ singleuser:
lifecycleHooks: {}
initContainers: []
extraContainers: []
+ allowPrivilegeEscalation: false
uid: 1000
fsGid: 100
serviceAccountName:
@@ -387,29 +416,29 @@ singleuser:
static:
pvcName:
subPath: "{username}"
- capacity: 10Mi
- homeMountPath: /home/workspace
+ capacity: 10Mi # EDIT: CLOUDHARNESS
+ homeMountPath: /home/workspace # EDIT: CLOUDHARNESS
dynamic:
storageClass:
- pvcNameTemplate: jupyter-{username}
- volumeNameTemplate: jupyter-{username}
+ pvcNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
+ volumeNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
storageAccessModes: [ReadWriteOnce]
image:
- name: jupyter/base-notebook
- tag: "hub-1.4.2"
+ name: quay.io/jupyterhub/k8s-singleuser-sample
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
startTimeout: 300
cpu:
- limit: 0.4
- guarantee: 0.05
+ limit: 0.4 # EDIT: CLOUDHARNESS
+ guarantee: 0.05 # EDIT: CLOUDHARNESS
memory:
- limit: 0.5G
- guarantee: 0.1G
+ limit: 0.5G # EDIT: CLOUDHARNESS
+ guarantee: 0.1G # EDIT: CLOUDHARNESS
extraResource:
limits: {}
guarantees: {}
- cmd: /usr/local/bin/start-singleuser.sh
+ cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
profileList: []
@@ -417,74 +446,146 @@ singleuser:
# scheduling relates to the user-scheduler pods and user-placeholder pods.
scheduling:
userScheduler:
- enabled: false
+ enabled: false # EDIT: CLOUDHARNESS
+ revisionHistoryLimit:
replicas: 2
logLevel: 4
+ # plugins are configured on the user-scheduler to make us score how we
+ # schedule user pods in a way to help us schedule on the most busy node. By
+ # doing this, we help scale down more effectively. It isn't obvious how to
+ # enable/disable scoring plugins, and configure them, to accomplish this.
+ #
# plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
+ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
+ #
plugins:
score:
+ # These scoring plugins are enabled by default according to
+ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
+ # 2022-02-22.
+ #
+ # Enabled with high priority:
+ # - NodeAffinity
+ # - InterPodAffinity
+ # - NodeResourcesFit
+ # - ImageLocality
+ # Remains enabled with low default priority:
+ # - TaintToleration
+ # - PodTopologySpread
+ # - VolumeBinding
+ # Disabled for scoring:
+ # - NodeResourcesBalancedAllocation
+ #
disabled:
- - name: SelectorSpread
- - name: TaintToleration
- - name: PodTopologySpread
+ # We disable these plugins (with regards to scoring) to not interfere
+ # or complicate our use of NodeResourcesFit.
- name: NodeResourcesBalancedAllocation
- - name: NodeResourcesLeastAllocated
# Disable plugins to be allowed to enable them again with a different
# weight and avoid an error.
- - name: NodePreferAvoidPods
- name: NodeAffinity
- name: InterPodAffinity
+ - name: NodeResourcesFit
- name: ImageLocality
enabled:
- - name: NodePreferAvoidPods
- weight: 161051
- name: NodeAffinity
weight: 14631
- name: InterPodAffinity
weight: 1331
- - name: NodeResourcesMostAllocated
+ - name: NodeResourcesFit
weight: 121
- name: ImageLocality
weight: 11
+ pluginConfig:
+ # Here we declare that we should optimize pods to fit based on a
+ # MostAllocated strategy instead of the default LeastAllocated.
+ - name: NodeResourcesFit
+ args:
+ scoringStrategy:
+ resources:
+ - name: cpu
+ weight: 1
+ - name: memory
+ weight: 1
+ type: MostAllocated
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
# IMPORTANT: Bumping the minor version of this binary should go hand in
- # hand with an inspection of the user-scheduelrs RBAC resources
- # that we have forked.
- name: k8s.gcr.io/kube-scheduler
- tag: v1.19.13 # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
+ # hand with an inspection of the user-scheduelr's RBAC
+ # resources that we have forked in
+ # templates/scheduling/user-scheduler/rbac.yaml.
+ #
+ # Debugging advice:
+ #
+ # - Is configuration of kube-scheduler broken in
+ # templates/scheduling/user-scheduler/configmap.yaml?
+ #
+ # - Is the kube-scheduler binary's compatibility to work
+ # against a k8s api-server that is too new or too old?
+ #
+ # - You can update the GitHub workflow that runs tests to
+ # include "deploy/user-scheduler" in the k8s namespace report
+ # and reduce the user-scheduler deployments replicas to 1 in
+ # dev-config.yaml to get relevant logs from the user-scheduler
+ # pods. Inspect the "Kubernetes namespace report" action!
+ #
+ # - Typical failures are that kube-scheduler fails to search for
+ # resources via its "informers", and won't start trying to
+ # schedule pods before they succeed which may require
+ # additional RBAC permissions or that the k8s api-server is
+ # aware of the resources.
+ #
+ # - If "successfully acquired lease" can be seen in the logs, it
+ # is a good sign kube-scheduler is ready to schedule pods.
+ #
+ name: registry.k8s.io/kube-scheduler
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow. The minor version is pinned in the
+ # workflow, and should be updated there if a minor version bump is done
+ # here. We aim to stay around 1 minor version behind the latest k8s
+ # version.
+ #
+ tag: "v1.28.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy:
pullSecrets: []
nodeSelector: {}
tolerations: []
+ labels: {}
+ annotations: {}
pdb:
enabled: true
maxUnavailable: 1
minAvailable:
resources: {}
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
+ imagePullerPriority: -5
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
image:
- name: k8s.gcr.io/pause
- # tag's can be updated by inspecting the output of the command:
- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
+ name: registry.k8s.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
#
# If you update this, also update prePuller.pause.image.tag
- tag: "3.5"
+ #
+ tag: "3.9"
pullPolicy:
pullSecrets: []
+ revisionHistoryLimit:
replicas: 0
+ labels: {}
+ annotations: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
@@ -517,6 +618,8 @@ scheduling:
# prePuller relates to the hook|continuous-image-puller DaemonsSets
prePuller:
+ revisionHistoryLimit:
+ labels: {}
annotations: {}
resources: {}
containerSecurityContext:
@@ -530,8 +633,8 @@ prePuller:
pullOnlyOnChanges: true
# image and the configuration below relates to the hook-image-awaiter Job
image:
- name: jupyterhub/k8s-image-awaiter
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-image-awaiter
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
containerSecurityContext:
@@ -543,6 +646,8 @@ prePuller:
tolerations: []
resources: {}
serviceAccount:
+ create: true
+ name:
annotations: {}
continuous:
enabled: true
@@ -554,18 +659,20 @@ prePuller:
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: k8s.gcr.io/pause
- # tag's can be updated by inspecting the output of the command:
- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
+ name: registry.k8s.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
#
# If you update this, also update scheduling.userPlaceholder.image.tag
- tag: "3.5"
+ #
+ tag: "3.9"
pullPolicy:
pullSecrets: []
ingress:
enabled: false
annotations: {}
+ ingressClassName:
hosts: []
pathSuffix:
pathType: Prefix
@@ -581,7 +688,8 @@ ingress:
cull:
enabled: true
users: false # --cull-users
- removeNamedServers: true # --remove-named-servers
+ adminUsers: true # --cull-admin-users
+ removeNamedServers: true # EDIT: CLOUDHARNESS
timeout: 3600 # --timeout
every: 600 # --cull-every
concurrency: 10 # --concurrency
diff --git a/applications/jupyterhub/zero-to-jupyterhub-k8s b/applications/jupyterhub/zero-to-jupyterhub-k8s
new file mode 160000
index 00000000..c92c1237
--- /dev/null
+++ b/applications/jupyterhub/zero-to-jupyterhub-k8s
@@ -0,0 +1 @@
+Subproject commit c92c12374795e84f36f5f16c4e8b8a448ad2f230
From cff3c6b7d85b0d6c3f99f3d84222eb6062bd3d85 Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Sat, 20 Jan 2024 11:31:34 +0100
Subject: [PATCH 002/210] CH-110 jupyterhub update wip
---
applications/jupyterhub/README.md | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
index d961d034..d7d67d4d 100755
--- a/applications/jupyterhub/README.md
+++ b/applications/jupyterhub/README.md
@@ -31,3 +31,13 @@ To support the pre pulling of task images see (https://github.com/MetaCell/cloud
the template `templates/image-puller/_helpers-daemonset.tpl` has been changed (see line 167 and on)
TODO: remember to implement/revise this code after you have updated/changed the templates of JupyterHub
+
+## How to update
+
+The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart.
+
+1. Run update.sh [TAG] # Do not use latest!
+2. Restore from the diff files with EDIT: CLOUDHARNESS
+
+Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG]
+
From 428d83d75b91cb956c6cf758cc87cb5ab4a25efa Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Sat, 20 Jan 2024 11:31:46 +0100
Subject: [PATCH 003/210] CH-110 jupyterhub update wip
---
applications/jupyterhub/Dockerfile | 26 +-
applications/jupyterhub/README.md | 3 +-
.../deploy/resources/hub/jupyterhub_config.py | 1 +
.../jupyterhub/deploy/resources/hub/z2jh.py | 1 +
.../deploy/templates/_helpers-auth-rework.tpl | 4 +-
.../jupyterhub/deploy/templates/_helpers.tpl | 2 +-
applications/jupyterhub/update.patch | 5845 +++++++++++++++++
applications/jupyterhub/update.sh | 28 +
deployment/codefresh-test-local.yaml | 439 +-
9 files changed, 6050 insertions(+), 299 deletions(-)
create mode 100644 applications/jupyterhub/update.patch
create mode 100644 applications/jupyterhub/update.sh
diff --git a/applications/jupyterhub/Dockerfile b/applications/jupyterhub/Dockerfile
index 8b279adc..907ce672 100755
--- a/applications/jupyterhub/Dockerfile
+++ b/applications/jupyterhub/Dockerfile
@@ -1,31 +1,39 @@
ARG CLOUDHARNESS_BASE
FROM $CLOUDHARNESS_BASE as base
-FROM jupyterhub/k8s-hub:1.1.3
+FROM quay.io/jupyterhub/k8s-hub:3.2.1
USER root
COPY --from=base libraries/models/requirements.txt /libraries/models/requirements.txt
-RUN pip install -r /libraries/models/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/models/requirements.txt
COPY --from=base libraries/cloudharness-common/requirements.txt /libraries/cloudharness-common/requirements.txt
-RUN pip install -r /libraries/cloudharness-common/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/cloudharness-common/requirements.txt
COPY --from=base libraries/client/cloudharness_cli/requirements.txt /libraries/client/cloudharness_cli/requirements.txt
-RUN pip install -r /libraries/client/cloudharness_cli/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/client/cloudharness_cli/requirements.txt
COPY --from=base libraries/models /libraries/models
-RUN pip install -e /libraries/models
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/models
COPY --from=base libraries/cloudharness-common /libraries/cloudharness-common
COPY --from=base libraries/client/cloudharness_cli /libraries/client/cloudharness_cli
#
-RUN pip install -e /libraries/cloudharness-common
-RUN pip install -e /libraries/client/cloudharness_cli
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/cloudharness-common
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/client/cloudharness_cli
COPY src src
-RUN pip install ./src/harness_jupyter
-RUN pip install ./src/chauthenticator
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install ./src/harness_jupyter
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install ./src/chauthenticator
USER jovyan
diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
index d7d67d4d..9ad78d2f 100755
--- a/applications/jupyterhub/README.md
+++ b/applications/jupyterhub/README.md
@@ -37,7 +37,8 @@ TODO: remember to implement/revise this code after you have updated/changed the
The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart.
1. Run update.sh [TAG] # Do not use latest!
-2. Restore from the diff files with EDIT: CLOUDHARNESS
+2. Restore from the diff files with EDIT: CLOUDHARNESS. Use update.patch as a reference
+3. 3. Update Dockerfile to use the same base image you see on values.yaml: hub/image
Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG]
diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
index 8ec801ee..5ebe20b5 100755
--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
@@ -537,6 +537,7 @@ def camelCaseify(s):
c.Authenticator.auto_login = True
c.OAuthenticator.client_id = client_id
c.OAuthenticator.client_secret = client_secret
+ c.OAuthenticator.allow_all = True
c.GenericOAuthenticator.login_service = "CH"
c.GenericOAuthenticator.username_key = "email"
diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
index fc368f64..2fe0d25b 100755
--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py
+++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
@@ -119,6 +119,7 @@ def get_config(key, default=None):
value = value[level]
# EDIT: CLOUDHARNESS START
+ import re
if value and isinstance(value, str):
replace_var = re.search("{{.*?}}", value)
if replace_var:
diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
index 3159d103..e9d2b4f4 100644
--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
@@ -178,7 +178,7 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
representing the old z2jh config, output the result
in $c.
*/}}
- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }}
+ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub) }}
{{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
{{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}}
@@ -191,7 +191,7 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
{{- /* UPDATE c dict explicitly with auth.custom.config */}}
{{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
{{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
- {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}}
+ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub }}
{{- range $key, $val := $custom_config }}
{{- $_ := set $custom_config $key "***" }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
index a2023639..1737f3d6 100755
--- a/applications/jupyterhub/deploy/templates/_helpers.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
@@ -194,7 +194,7 @@ component: {{ include "jupyterhub.componentLabel" . }}
using "toYaml | fromYaml" in order to be able to use normal helm
template functions on it.
*/}}
- {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}}
+ {{- $jupyterhub_values := .root.Values.apps.jupyterhub }}
{{- if ne .root.Chart.Name "jupyterhub" }}
{{- if .root.Values.apps.jupyterhub.jupyterhub }}
{{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }}
diff --git a/applications/jupyterhub/update.patch b/applications/jupyterhub/update.patch
new file mode 100644
index 00000000..5241525b
--- /dev/null
+++ b/applications/jupyterhub/update.patch
@@ -0,0 +1,5845 @@
+diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
+index d961d03..d7d67d4 100755
+--- a/applications/jupyterhub/README.md
++++ b/applications/jupyterhub/README.md
+@@ -31,3 +31,13 @@ To support the pre pulling of task images see (https://github.com/MetaCell/cloud
+ the template `templates/image-puller/_helpers-daemonset.tpl` has been changed (see line 167 and on)
+
+ TODO: remember to implement/revise this code after you have updated/changed the templates of JupyterHub
++
++## How to update
++
++The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart.
++
++1. Run update.sh [TAG] # Do not use latest!
++2. Restore from the diff files with EDIT: CLOUDHARNESS
++
++Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG]
++
+diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+index d4b3cee..8ec801e 100755
+--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
++++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+@@ -1,9 +1,17 @@
++# load the config object (satisfies linters)
++c = get_config() # noqa
++
++import glob
+ import os
+ import re
+ import sys
+-import logging
+
++from jupyterhub.utils import url_path_join
++from kubernetes_asyncio import client
+ from tornado.httpclient import AsyncHTTPClient
++
++#CLOUDHARNESS: EDIT START
++import logging
+ from kubernetes import client
+ from jupyterhub.utils import url_path_join
+
+@@ -12,7 +20,7 @@ try:
+ harness_hub() # activates harness hooks on jupyterhub
+ except Exception as e:
+ logging.error("could not import harness_jupyter", exc_info=True)
+-
++# CLOUDHARNESS: EDIT END
+
+ # Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
+ configuration_directory = os.path.dirname(os.path.realpath(__file__))
+@@ -20,39 +28,13 @@ sys.path.insert(0, configuration_directory)
+
+ from z2jh import (
+ get_config,
+- set_config_if_not_none,
+ get_name,
+ get_name_env,
+ get_secret_value,
++ set_config_if_not_none,
+ )
+
+
+-print('Base url is', c.JupyterHub.get('base_url', '/'))
+-
+-# Configure JupyterHub to use the curl backend for making HTTP requests,
+-# rather than the pure-python implementations. The default one starts
+-# being too slow to make a large number of requests to the proxy API
+-# at the rate required.
+-AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+-
+-c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
+-
+-# Connect to a proxy running in a different pod
+-c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
+-c.ConfigurableHTTPProxy.should_start = False
+-
+-# Do not shut down user pods when hub is restarted
+-c.JupyterHub.cleanup_servers = False
+-
+-# Check that the proxy has routes appropriately setup
+-c.JupyterHub.last_activity_interval = 60
+-
+-# Don't wait at all before redirecting a spawning user to the progress page
+-c.JupyterHub.tornado_settings = {
+- 'slow_spawn_timeout': 0,
+-}
+-
+-
+ def camelCaseify(s):
+ """convert snake_case to camelCase
+
+@@ -173,6 +155,7 @@ for trait, cfg_key in (
+ ("events_enabled", "events"),
+ ("extra_labels", None),
+ ("extra_annotations", None),
++ # ("allow_privilege_escalation", None), # Managed manually below
+ ("uid", None),
+ ("fs_gid", None),
+ ("service_account", "serviceAccountName"),
+@@ -206,10 +189,19 @@ image = get_config("singleuser.image.name")
+ if image:
+ tag = get_config("singleuser.image.tag")
+ if tag:
+- image = "{}:{}".format(image, tag)
++ image = f"{image}:{tag}"
+
+ c.KubeSpawner.image = image
+
++# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a
++# property where None, False, and True all are valid values that users of the
++# Helm chart may want to set, we can't use the set_config_if_not_none helper
++# function as someone may want to override the default False value to None.
++#
++c.KubeSpawner.allow_privilege_escalation = get_config(
++ "singleuser.allowPrivilegeEscalation"
++)
++
+ # Combine imagePullSecret.create (single), imagePullSecrets (list), and
+ # singleuser.image.pullSecrets (list).
+ image_pull_secrets = []
+@@ -255,7 +247,7 @@ if match_node_purpose:
+ pass
+ else:
+ raise ValueError(
+- "Unrecognized value for matchNodePurpose: %r" % match_node_purpose
++ f"Unrecognized value for matchNodePurpose: {match_node_purpose}"
+ )
+
+ # Combine the common tolerations for user pods with singleuser tolerations
+@@ -271,7 +263,7 @@ if storage_type == "dynamic":
+ pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
+ c.KubeSpawner.pvc_name_template = pvc_name_template
+ volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
+- c.KubeSpawner.storage_pvc_ensure = False
++ c.KubeSpawner.storage_pvc_ensure = True
+ set_config_if_not_none(
+ c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass"
+ )
+@@ -354,41 +346,62 @@ c.KubeSpawner.volume_mounts.extend(
+ )
+
+ c.JupyterHub.services = []
++c.JupyterHub.load_roles = []
+
++# jupyterhub-idle-culler's permissions are scoped to what it needs only, see
++# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions.
++#
+ if get_config("cull.enabled", False):
++ jupyterhub_idle_culler_role = {
++ "name": "jupyterhub-idle-culler",
++ "scopes": [
++ "list:users",
++ "read:users:activity",
++ "read:servers",
++ "delete:servers",
++ # "admin:users", # dynamically added if --cull-users is passed
++ ],
++ # assign the role to a jupyterhub service, so it gains these permissions
++ "services": ["jupyterhub-idle-culler"],
++ }
++
+ cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"]
+ base_url = c.JupyterHub.get("base_url", "/")
+ cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api"))
+
+ cull_timeout = get_config("cull.timeout")
+ if cull_timeout:
+- cull_cmd.append("--timeout=%s" % cull_timeout)
++ cull_cmd.append(f"--timeout={cull_timeout}")
+
+ cull_every = get_config("cull.every")
+ if cull_every:
+- cull_cmd.append("--cull-every=%s" % cull_every)
++ cull_cmd.append(f"--cull-every={cull_every}")
+
+ cull_concurrency = get_config("cull.concurrency")
+ if cull_concurrency:
+- cull_cmd.append("--concurrency=%s" % cull_concurrency)
++ cull_cmd.append(f"--concurrency={cull_concurrency}")
+
+ if get_config("cull.users"):
+ cull_cmd.append("--cull-users")
++ jupyterhub_idle_culler_role["scopes"].append("admin:users")
++
++ if not get_config("cull.adminUsers"):
++ cull_cmd.append("--cull-admin-users=false")
+
+ if get_config("cull.removeNamedServers"):
+ cull_cmd.append("--remove-named-servers")
+
+ cull_max_age = get_config("cull.maxAge")
+ if cull_max_age:
+- cull_cmd.append("--max-age=%s" % cull_max_age)
++ cull_cmd.append(f"--max-age={cull_max_age}")
+
+ c.JupyterHub.services.append(
+ {
+- "name": "cull-idle",
+- "admin": True,
++ "name": "jupyterhub-idle-culler",
+ "command": cull_cmd,
+ }
+ )
++ c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role)
+
+ for key, service in get_config("hub.services", {}).items():
+ # c.JupyterHub.services is a list of dicts, but
+@@ -402,26 +415,44 @@ for key, service in get_config("hub.services", {}).items():
+
+ c.JupyterHub.services.append(service)
+
++for key, role in get_config("hub.loadRoles", {}).items():
++ # c.JupyterHub.load_roles is a list of dicts, but
++ # hub.loadRoles is a dict of dicts to make the config mergable
++ role.setdefault("name", key)
++
++ c.JupyterHub.load_roles.append(role)
++
++# respect explicit null command (distinct from unspecified)
++# this avoids relying on KubeSpawner.cmd's default being None
++_unspecified = object()
++specified_cmd = get_config("singleuser.cmd", _unspecified)
++if specified_cmd is not _unspecified:
++ c.Spawner.cmd = specified_cmd
+
+-set_config_if_not_none(c.Spawner, "cmd", "singleuser.cmd")
+ set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl")
+
+-cloud_metadata = get_config("singleuser.cloudMetadata", {})
++cloud_metadata = get_config("singleuser.cloudMetadata")
+
+ if cloud_metadata.get("blockWithIptables") == True:
+ # Use iptables to block access to cloud metadata by default
+ network_tools_image_name = get_config("singleuser.networkTools.image.name")
+ network_tools_image_tag = get_config("singleuser.networkTools.image.tag")
++ network_tools_resources = get_config("singleuser.networkTools.resources")
++ ip = cloud_metadata["ip"]
+ ip_block_container = client.V1Container(
+ name="block-cloud-metadata",
+ image=f"{network_tools_image_name}:{network_tools_image_tag}",
+ command=[
+ "iptables",
+- "-A",
++ "--append",
+ "OUTPUT",
+- "-d",
+- cloud_metadata.get("ip", "169.254.169.254"),
+- "-j",
++ "--protocol",
++ "tcp",
++ "--destination",
++ ip,
++ "--destination-port",
++ "80",
++ "--jump",
+ "DROP",
+ ],
+ security_context=client.V1SecurityContext(
+@@ -429,6 +460,7 @@ if cloud_metadata.get("blockWithIptables") == True:
+ run_as_user=0,
+ capabilities=client.V1Capabilities(add=["NET_ADMIN"]),
+ ),
++ resources=network_tools_resources,
+ )
+
+ c.KubeSpawner.init_containers.append(ip_block_container)
+@@ -438,17 +470,6 @@ if get_config("debug.enabled", False):
+ c.JupyterHub.log_level = "DEBUG"
+ c.Spawner.debug = True
+
+-# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
+-config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
+-if os.path.isdir(config_dir):
+- for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
+- file_name = os.path.basename(file_path)
+- print(f"Loading {config_dir} config: {file_name}")
+- with open(file_path) as f:
+- file_content = f.read()
+- # compiling makes debugging easier: https://stackoverflow.com/a/437857
+- exec(compile(source=file_content, filename=file_name, mode="exec"))
+-
+ # load potentially seeded secrets
+ #
+ # NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable
+@@ -471,11 +492,23 @@ for app, cfg in get_config("hub.config", {}).items():
+ cfg.pop("keys", None)
+ c[app].update(cfg)
+
++# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
++config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
++if os.path.isdir(config_dir):
++ for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
++ file_name = os.path.basename(file_path)
++ print(f"Loading {config_dir} config: {file_name}")
++ with open(file_path) as f:
++ file_content = f.read()
++ # compiling makes debugging easier: https://stackoverflow.com/a/437857
++ exec(compile(source=file_content, filename=file_name, mode="exec"))
++
+ # execute hub.extraConfig entries
+ for key, config_py in sorted(get_config("hub.extraConfig", {}).items()):
+- print("Loading extra config: %s" % key)
++ print(f"Loading extra config: {key}")
+ exec(config_py)
+
++# CLOUDHARNESS: EDIT START
+ # Allow switching authenticators easily
+ auth_type = get_config('hub.config.JupyterHub.authenticator_class')
+ email_domain = 'local'
+@@ -525,4 +558,5 @@ set_config_if_not_none(c.Authenticator, 'whitelist', 'auth.whitelist.users')
+ c.apps = get_config('apps')
+ c.registry = get_config('registry')
+ c.domain = get_config('root.domain')
+-c.namespace = get_config('root.namespace')
+\ No newline at end of file
++c.namespace = get_config('root.namespace')
++# CLOUDHARNESS: EDIT END
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
+index 834a6b6..fc368f6 100755
+--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py
++++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
+@@ -3,15 +3,15 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
+
+ Methods here can be imported by extraConfig in values.yaml
+ """
+-from collections import Mapping
+-from functools import lru_cache
+ import os
+-import re
++from collections.abc import Mapping
++from functools import lru_cache
+
+ import yaml
+
++
+ # memoize so we only load config once
+-@lru_cache()
++@lru_cache
+ def _load_config():
+ """Load the Helm chart configuration used to render the Helm templates of
+ the chart from a mounted k8s Secret, and merge in values from an optionally
+@@ -27,6 +27,7 @@ def _load_config():
+ cfg = _merge_dictionaries(cfg, values)
+ else:
+ print(f"No config at {path}")
++ # EDIT: CLOUDHARNESS START
+ path = f"/opt/cloudharness/resources/allvalues.yaml"
+ if os.path.exists(path):
+ print("Loading global CloudHarness config at", path)
+@@ -34,11 +35,11 @@ def _load_config():
+ values = yaml.safe_load(f)
+ cfg = _merge_dictionaries(cfg, values)
+ cfg['root'] = values
+-
++ # EDIT: CLOUDHARNESS END
+ return cfg
+
+
+-@lru_cache()
++@lru_cache
+ def _get_config_value(key):
+ """Load value from the k8s ConfigMap given a key."""
+
+@@ -50,7 +51,7 @@ def _get_config_value(key):
+ raise Exception(f"{path} not found!")
+
+
+-@lru_cache()
++@lru_cache
+ def get_secret_value(key, default="never-explicitly-set"):
+ """Load value from the user managed k8s Secret or the default k8s Secret
+ given a key."""
+@@ -117,7 +118,7 @@ def get_config(key, default=None):
+ else:
+ value = value[level]
+
+-
++ # EDIT: CLOUDHARNESS START
+ if value and isinstance(value, str):
+ replace_var = re.search("{{.*?}}", value)
+ if replace_var:
+@@ -128,6 +129,7 @@ def get_config(key, default=None):
+ if repl:
+ print("replace", variable, "in", value, ":", repl)
+ value = re.sub("{{.*?}}", repl, value)
++ # EDIT: CLOUDHARNESS END
+ return value
+
+
+@@ -137,6 +139,5 @@ def set_config_if_not_none(cparent, name, key):
+ configuration item if not None
+ """
+ data = get_config(key)
+-
+ if data is not None:
+- setattr(cparent, name, data)
+\ No newline at end of file
++ setattr(cparent, name, data)
+diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt
+new file mode 100644
+index 0000000..9769a9c
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/NOTES.txt
+@@ -0,0 +1,158 @@
++{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
++
++{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
++. __ __ __ __ __
++ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
++ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
++/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
++\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
++ /_/ /____/
++
++ You have successfully installed the official JupyterHub Helm chart!
++
++### Installation info
++
++ - Kubernetes namespace: {{ .Release.Namespace }}
++ - Helm release name: {{ .Release.Name }}
++ - Helm chart version: {{ .Chart.Version }}
++ - JupyterHub version: {{ .Chart.AppVersion }}
++ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
++
++### Followup links
++
++ - Documentation: https://z2jh.jupyter.org
++ - Help forum: https://discourse.jupyter.org
++ - Social chat: https://gitter.im/jupyterhub/jupyterhub
++ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
++
++### Post-installation checklist
++
++ - Verify that created Pods enter a Running state:
++
++ kubectl --namespace={{ .Release.Namespace }} get pod
++
++ If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} describe pod
++
++ If a pod keeps restarting, diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} logs --previous
++ {{- println }}
++
++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer" }}
++ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
++
++ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
++
++ If the external ip remains , diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
++ {{- end }}
++
++ - Verify web based access:
++ {{- println }}
++ {{- if .Values.apps.jupyterhub.ingress.enabled }}
++ {{- range $host := .Values.apps.jupyterhub.ingress.hosts }}
++ Try insecure HTTP access: http://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
++ {{- end }}
++
++ {{- range $tls := .Values.apps.jupyterhub.ingress.tls }}
++ {{- range $host := $tls.hosts }}
++ Try secure HTTPS access: https://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
++ {{- end }}
++ {{- end }}
++ {{- else }}
++ You have not configured a k8s Ingress resource so you need to access the k8s
++ Service {{ $proxy_service }} directly.
++ {{- println }}
++
++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "NodePort" }}
++ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
++ that all the k8s cluster's nodes are exposing the k8s Service via those
++ ports.
++
++ Try insecure HTTP access: http://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
++ Try secure HTTPS access: https://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
++
++ {{- else }}
++ If your computer is outside the k8s cluster, you can port-forward traffic to
++ the k8s Service {{ $proxy_service }} with kubectl to access it from your
++ computer.
++
++ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
++
++ Try insecure HTTP access: http://localhost:8080
++ {{- end }}
++ {{- end }}
++ {{- println }}
++
++
++
++
++
++{{- /*
++ Warnings for likely misconfigurations
++*/}}
++
++{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }}
++#################################################################################
++###### WARNING: You are using user placeholders without pod priority #####
++###### enabled*, either enable pod priority or stop using the #####
++###### user placeholders** to avoid having placeholders that #####
++###### refuse to make room for a real user. #####
++###### #####
++###### *scheduling.podPriority.enabled #####
++###### **scheduling.userPlaceholder.enabled #####
++###### **scheduling.userPlaceholder.replicas #####
++#################################################################################
++{{- println }}
++{{- end }}
++
++
++
++
++
++{{- /*
++ Breaking changes and failures for likely misconfigurations.
++*/}}
++
++{{- $breaking := "" }}
++{{- $breaking_title := "\n" }}
++{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
++{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
++{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
++{{- $breaking_title = print $breaking_title "\n###### #####" }}
++{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
++{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
++{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
++
++
++{{- /*
++ This is an example (in a helm template comment) on how to detect and
++ communicate with regards to a breaking chart config change.
++
++ {{- if hasKey .Values.apps.jupyterhub.singleuser.cloudMetadata "enabled" }}
++ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
++ {{- end }}
++*/}}
++
++
++{{- if hasKey .Values.apps.jupyterhub.rbac "enabled" }}
++{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }}
++{{- end }}
++
++
++{{- if hasKey .Values.apps.jupyterhub.hub "fsGid" }}
++{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
++{{- end }}
++
++
++{{- if and .Values.apps.jupyterhub.singleuser.cloudMetadata.blockWithIptables (and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled .Values.apps.jupyterhub.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }}
++{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }}
++{{- end }}
++
++
++{{- if $breaking }}
++{{- fail (print $breaking_title $breaking "\n\n") }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+index b742a12..3159d10 100644
+--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+@@ -168,30 +168,30 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
+ {{- $c := dict }}
+ {{- $result := (dict "hub" (dict "config" $c)) }}
+ {{- /*
+- Flattens the config in .Values.apps.jupyterhub.auth to a format of
++ Flattens the config in .Values.apps.jupyterhub.apps.jupyterhub.auth to a format of
+ "keyX.keyY...": "value". Writes output to $c.
+ */}}
+- {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.auth "type" "custom")) }}
++ {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.apps.jupyterhub.auth "type" "custom")) }}
+
+ {{- /*
+ Transform the flattened config using a dictionary
+ representing the old z2jh config, output the result
+ in $c.
+ */}}
+- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.global.safeToShowValues) }}
++ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }}
+
+- {{- $class_old_config_key := .Values.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
++ {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
+ {{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}}
+ {{- $class_new_config_key := "" }} {{- /* LDAPAuthenticator - GitHubOAuthenticator */}}
+
+ {{- /* SET $class_new_entrypoint, $class_new_config_key */}}
+ {{- if eq $class_old_config_key "custom" }}
+- {{- $class_new_entrypoint = .Values.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
++ {{- $class_new_entrypoint = .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
+ {{- $class_new_config_key = $class_new_entrypoint | splitList "." | last }}
+ {{- /* UPDATE c dict explicitly with auth.custom.config */}}
+- {{- if .Values.apps.jupyterhub.auth.custom.config }}
+- {{- $custom_config := merge (dict) .Values.apps.jupyterhub.auth.custom.config }}
+- {{- if not .Values.apps.jupyterhub.global.safeToShowValues }}
++ {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
++ {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
++ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}}
+ {{- range $key, $val := $custom_config }}
+ {{- $_ := set $custom_config $key "***" }}
+ {{- end }}
+@@ -213,7 +213,7 @@ The JupyterHub Helm chart's auth config has been reworked and requires changes.
+
+ The new way to configure authentication in chart version 0.11.0+ is printed
+ below for your convenience. The values are not shown by default to ensure no
+-secrets are exposed, run helm upgrade with --set global.safeToShowValues=true
++secrets are exposed, run helm upgrade with --set global.safeToSho.Values.apps.jupyterhub.true
+ to show them.
+
+ {{ $result | toYaml }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+index e9cf7bb..401d601 100644
+--- a/applications/jupyterhub/deploy/templates/_helpers-names.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+@@ -3,8 +3,8 @@
+ parent charts to reference these dynamic resource names.
+
+ To avoid duplicating documentation, for more information, please see the the
+- fullnameOverride entry in schema.yaml or the configuration reference that
+- schema.yaml renders to.
++ fullnameOverride entry in values.schema.yaml or the configuration reference
++ that values.schema.yaml renders to.
+
+ https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
+ */}}
+@@ -38,8 +38,8 @@
+ {{- $name_override := .Values.apps.jupyterhub.nameOverride }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+ {{- if .Values.apps.jupyterhub.jupyterhub }}
+- {{- $fullname_override = .Values.apps.jupyterhub.fullnameOverride }}
+- {{- $name_override = .Values.apps.jupyterhub.nameOverride }}
++ {{- $fullname_override = .Values.apps.jupyterhub.jupyterhub.fullnameOverride }}
++ {{- $name_override = .Values.apps.jupyterhub.jupyterhub.nameOverride }}
+ {{- end }}
+ {{- end }}
+
+@@ -76,12 +76,23 @@
+ {{- include "jupyterhub.fullname.dash" . }}hub
+ {{- end }}
+
++{{- /* hub-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.hub.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* hub-existing-secret Secret */}}
+ {{- define "jupyterhub.hub-existing-secret.fullname" -}}
+ {{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
+ {{- $existing_secret := .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+- {{- $existing_secret = .Values.apps.jupyterhub.hub.existingSecret }}
++ {{- if .Values.apps.jupyterhub.jupyterhub }}
++ {{- $existing_secret = .Values.apps.jupyterhub.jupyterhub.hub.existingSecret }}
++ {{- end }}
+ {{- end }}
+ {{- if $existing_secret }}
+ {{- $existing_secret }}
+@@ -133,11 +144,29 @@
+ {{- include "jupyterhub.fullname.dash" . }}autohttps
+ {{- end }}
+
++{{- /* autohttps-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.proxy.traefik.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler Deployment */}}
+ {{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}user-scheduler
+ {{- end }}
+
++{{- /* user-scheduler-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler leader election lock resource */}}
+ {{- define "jupyterhub.user-scheduler-lock.fullname" -}}
+ {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
+@@ -153,6 +182,15 @@
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
+ {{- end }}
+
++{{- /* image-awaiter-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* hook-image-puller DaemonSet */}}
+ {{- define "jupyterhub.hook-image-puller.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-puller
+@@ -210,6 +248,15 @@
+ {{- end }}
+ {{- end }}
+
++{{- /* image-puller Priority */}}
++{{- define "jupyterhub.image-puller-priority.fullname" -}}
++ {{- if (include "jupyterhub.fullname" .) }}
++ {{- include "jupyterhub.fullname.dash" . }}image-puller
++ {{- else }}
++ {{- .Release.Name }}-image-puller-priority
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler's registered name */}}
+ {{- define "jupyterhub.user-scheduler.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+@@ -231,6 +278,7 @@
+ fullname: {{ include "jupyterhub.fullname" . | quote }}
+ fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
+ hub: {{ include "jupyterhub.hub.fullname" . | quote }}
++hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
+ hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
+ hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
+ hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
+@@ -241,10 +289,14 @@ proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
+ proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
+ proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
+ autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
++autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
+ user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
++user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
+ user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
+ user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
++image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
+ hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
++hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
+ hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
+ continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
+ singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
+new file mode 100644
+index 0000000..4075569
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
+@@ -0,0 +1,101 @@
++{{- /*
++ This named template renders egress rules for NetworkPolicy resources based on
++ common configuration.
++
++ It is rendering based on the `egressAllowRules` and `egress` keys of the
++ passed networkPolicy config object. Each flag set to true under
++ `egressAllowRules` is rendered to a egress rule that next to any custom user
++ defined rules from the `egress` config.
++
++ This named template needs to render based on a specific networkPolicy
++ resource, but also needs access to the root context. Due to that, it
++ accepts a list as its scope, where the first element is supposed to be the
++ root context and the second element is supposed to be the networkPolicy
++ configuration object.
++
++ As an example, this is how you would render this named template from a
++ NetworkPolicy resource under its egress:
++
++ egress:
++ # other rules here...
++
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
++ {{- . | nindent 4 }}
++ {{- end }}
++
++ Note that the reference to privateIPs and nonPrivateIPs relate to
++ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
++*/}}
++
++{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
++{{- $root := index . 0 }}
++{{- $netpol := index . 1 }}
++{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }}
++- ports:
++ - port: 53
++ protocol: UDP
++ - port: 53
++ protocol: TCP
++ to:
++ {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }}
++ # Allow outbound connections to DNS ports on the cloud metadata server
++ - ipBlock:
++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++ {{- end }}
++ {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }}
++ # Allow outbound connections to DNS ports on pods in the kube-system
++ # namespace
++ - namespaceSelector:
++ matchLabels:
++ kubernetes.io/metadata.name: kube-system
++ {{- end }}
++ {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
++ # Allow outbound connections to DNS ports on destinations in the private IP
++ # ranges
++ - ipBlock:
++ cidr: 10.0.0.0/8
++ - ipBlock:
++ cidr: 172.16.0.0/12
++ - ipBlock:
++ cidr: 192.168.0.0/16
++ {{- end }}
++{{- end }}
++
++{{- if $netpol.egressAllowRules.nonPrivateIPs }}
++# Allow outbound connections to non-private IP ranges
++- to:
++ - ipBlock:
++ cidr: 0.0.0.0/0
++ except:
++ # As part of this rule:
++ # - don't allow outbound connections to private IPs
++ - 10.0.0.0/8
++ - 172.16.0.0/12
++ - 192.168.0.0/16
++ # - don't allow outbound connections to the cloud metadata server
++ - {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++{{- end }}
++
++{{- if $netpol.egressAllowRules.privateIPs }}
++# Allow outbound connections to private IP ranges
++- to:
++ - ipBlock:
++ cidr: 10.0.0.0/8
++ - ipBlock:
++ cidr: 172.16.0.0/12
++ - ipBlock:
++ cidr: 192.168.0.0/16
++{{- end }}
++
++{{- if $netpol.egressAllowRules.cloudMetadataServer }}
++# Allow outbound connections to the cloud metadata server
++- to:
++ - ipBlock:
++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++{{- end }}
++
++{{- with $netpol.egress }}
++# Allow outbound connections based on user specified rules
++{{ . | toYaml }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
+index efea86d..a202363 100755
+--- a/applications/jupyterhub/deploy/templates/_helpers.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
+@@ -12,7 +12,7 @@
+
+ When you ask a helper to render its content, one often forward the current
+ scope to the helper in order to allow it to access .Release.Name,
+- .Values.apps.jupyterhub.rbac.enabled and similar values.
++ .Values.apps.jupyterhub.rbac.create and similar values.
+
+ #### Example - Passing the current scope
+ {{ include "jupyterhub.commonLabels" . }}
+@@ -180,8 +180,51 @@ component: {{ include "jupyterhub.componentLabel" . }}
+ Augments passed .pullSecrets with $.Values.apps.jupyterhub.imagePullSecrets
+ */}}
+ {{- define "jupyterhub.imagePullSecrets" -}}
++ {{- /*
++ We have implemented a trick to allow a parent chart depending on this
++ chart to call this named templates.
++
++ Caveats and notes:
++
++ 1. While parent charts can reference these, grandparent charts can't.
++ 2. Parent charts must not use an alias for this chart.
++ 3. There is no failsafe workaround to above due to
++ https://github.com/helm/helm/issues/9214.
++ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
++ using "toYaml | fromYaml" in order to be able to use normal helm
++ template functions on it.
++ */}}
++ {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}}
++ {{- if ne .root.Chart.Name "jupyterhub" }}
++ {{- if .root.Values.apps.jupyterhub.jupyterhub }}
++ {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }}
++ {{- end }}
++ {{- end }}
+
++ {{- /* Populate $_.list with all relevant entries */}}
++ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
++ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
++ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
++ {{- end }}
+
++ {{- /* Decide if something should be written */}}
++ {{- if not (eq ($_.list | toJson) "[]") }}
++
++ {{- /* Process the $_.list where strings become dicts with a name key and the
++ strings become the name keys' values into $_.res */}}
++ {{- $_ := set $_ "res" list }}
++ {{- range $_.list }}
++ {{- if eq (typeOf .) "string" }}
++ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
++ {{- else }}
++ {{- $__ := set $_ "res" (append $_.res .) }}
++ {{- end }}
++ {{- end }}
++
++ {{- /* Write the results */}}
++ {{- $_.res | toJson }}
++
++ {{- end }}
+ {{- end }}
+
+ {{- /*
+@@ -339,3 +382,21 @@ limits:
+ {{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
+ {{- end }}
+ {{- end }}
++
++{{- /*
++ jupyterhub.chart-version-to-git-ref:
++ Renders a valid git reference from a chartpress generated version string.
++ In practice, either a git tag or a git commit hash will be returned.
++
++ - The version string will follow a chartpress pattern, see
++ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
++
++ - The regexReplaceAll function is a sprig library function, see
++ https://masterminds.github.io/sprig/strings.html.
++
++ - The regular expression is in golang syntax, but \d had to become \\d for
++ example.
++*/}}
++{{- define "jupyterhub.chart-version-to-git-ref" -}}
++{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+index c913f67..f52feb6 100755
+--- a/applications/jupyterhub/deploy/templates/hub/configmap.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+@@ -29,5 +29,6 @@ data:
+ */}}
+ checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
+
++ # EDIT: CLOUDHARNESS
+ allvalues.yaml: |
+ {{- .Values | toYaml | nindent 4 }}
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+index 82132c6..d105ecc 100755
+--- a/applications/jupyterhub/deploy/templates/hub/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+@@ -5,6 +5,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
++ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+@@ -30,11 +33,14 @@ spec:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+-{{ include "deploy_utils.etcHosts" . | indent 6 }}
++{{ include "deploy_utils.etcHosts" . | indent 6 }} # EDIT: CLOUDHARNESS
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.hub.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.hub.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -44,7 +50,7 @@ spec:
+ - name: config
+ configMap:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+- {{- /* This is needed by cloudharness libraries */}}
++ {{- /* EDIT: CLOUDHARNESS This is needed by cloudharness libraries */}}
+ - name: cloudharness-allvalues
+ configMap:
+ name: cloudharness-allvalues
+@@ -82,11 +88,13 @@ spec:
+ persistentVolumeClaim:
+ claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
+ {{- end }}
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.hub.fullname" . }}
++ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
+ {{- end }}
++ {{- with .Values.apps.jupyterhub.hub.podSecurityContext }}
+ securityContext:
+- fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.hub.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+@@ -153,14 +161,14 @@ spec:
+ name: config
+ - mountPath: /usr/local/etc/jupyterhub/secret/
+ name: secret
+- - name: cloudharness-allvalues
++ - name: cloudharness-allvalues # EDIT: CLOUDHARNESS START
+ mountPath: /opt/cloudharness/resources/allvalues.yaml
+ subPath: allvalues.yaml
+ {{- if .Values.apps.accounts }}
+ - name: cloudharness-kc-accounts
+ mountPath: /opt/cloudharness/resources/auth
+ readOnly: true
+- {{- end }}
++ {{- end }} # EDIT: CLOUDHARNESS END
+ {{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
+ - mountPath: /usr/local/etc/jupyterhub/existing-secret/
+ name: existing-secret
+diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+index 9a7a6bc..d9508e2 100755
+--- a/applications/jupyterhub/deploy/templates/hub/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+@@ -61,31 +61,24 @@ spec:
+
+ egress:
+ # hub --> proxy
+- - ports:
+- - port: 8001
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8001
++
+ # hub --> singleuser-server
+- - ports:
+- - port: 8888
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8888
+
+- # hub --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
+-
+- {{- with .Values.apps.jupyterhub.hub.networkPolicy.egress }}
+- # hub --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+index 855609d..bb6c7b1 100755
+--- a/applications/jupyterhub/deploy/templates/hub/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if .Values.apps.jupyterhub.hub.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+index 738daab..1b689af 100755
+--- a/applications/jupyterhub/deploy/templates/hub/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+@@ -1,15 +1,4 @@
+-{{- if .Values.apps.jupyterhub.rbac.enabled -}}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.hub.fullname" . }}
+- {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+----
++{{- if .Values.apps.jupyterhub.rbac.create -}}
+ kind: Role
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -32,7 +21,7 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.hub.fullname" . }}
++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: Role
+diff --git a/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
+new file mode 100644
+index 0000000..817ed66
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
+@@ -0,0 +1,12 @@
++{{- if .Values.apps.jupyterhub.hub.serviceAccount.create -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
++ {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-pull-secret.yaml b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
+new file mode 100644
+index 0000000..b7544db
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
+@@ -0,0 +1,15 @@
++{{- if .Values.apps.jupyterhub.imagePullSecret.create }}
++kind: Secret
++apiVersion: v1
++metadata:
++ name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ annotations:
++ "helm.sh/hook": pre-install,pre-upgrade
++ "helm.sh/hook-delete-policy": before-hook-creation
++ "helm.sh/hook-weight": "-20"
++type: kubernetes.io/dockerconfigjson
++data:
++ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+index e16fd1a..528345c 100644
+--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
++++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+@@ -34,6 +34,9 @@ spec:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 100%
++ {{- if typeIs "int" .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
++ {{- end }}
+ template:
+ metadata:
+ labels:
+@@ -44,13 +47,17 @@ spec:
+ {{- end }}
+ spec:
+ {{- /*
+- continuous-image-puller pods are made evictable to save on the k8s pods
+- per node limit all k8s clusters have.
++ image-puller pods are made evictable to save on the k8s pods
++ per node limit all k8s clusters have and have a higher priority
++ than user-placeholder pods that could block an entire node.
+ */}}
+- {{- if and (not .hook) .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+- priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
++ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
++ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations .Values.apps.jupyterhub.prePuller.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -127,6 +134,7 @@ spec:
+ {{- /* --- Conditionally pull profileList images --- */}}
+ {{- if .Values.apps.jupyterhub.prePuller.pullProfileListImages }}
+ {{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }}
++ {{- /* profile's kubespawner_override */}}
+ {{- if $container.kubespawner_override }}
+ {{- if $container.kubespawner_override.image }}
+ - name: image-pull-singleuser-profilelist-{{ $k }}
+@@ -145,13 +153,15 @@ spec:
+ {{- end }}
+ {{- end }}
+ {{- end }}
+- {{- end }}
+- {{- end }}
+-
+- {{- /* --- Pull extra images --- */}}
+- {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
+- - name: image-pull-{{ $k }}
+- image: {{ $v.name }}:{{ $v.tag }}
++ {{- /* kubespawner_override in profile's profile_options */}}
++ {{- if $container.profile_options }}
++ {{- range $option, $option_spec := $container.profile_options }}
++ {{- if $option_spec.choices }}
++ {{- range $choice, $choice_spec := $option_spec.choices }}
++ {{- if $choice_spec.kubespawner_override }}
++ {{- if $choice_spec.kubespawner_override.image }}
++ - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }}
++ image: {{ $choice_spec.kubespawner_override.image }}
+ command:
+ - /bin/sh
+ - -c
+@@ -163,13 +173,20 @@ spec:
+ {{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+- {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
+ {{- end }}
+
+- {{- /* --- Pull CloudHarness tasks images --- */}}
+- {{- range $k, $v := ( index .Values "task-images" ) }}
+- - name: image-pull-{{ $k | replace "-" "" }}
+- image: {{ $v }}
++ {{- /* --- Pull extra images --- */}}
++ {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
++ - name: image-pull-{{ $k }}
++ image: {{ $v.name }}:{{ $v.tag }}
+ command:
+ - /bin/sh
+ - -c
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+index bdd9f63..cc6db3e 100755
+--- a/applications/jupyterhub/deploy/templates/image-puller/job.yaml
++++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+@@ -28,16 +28,22 @@ spec:
+ labels:
+ {{- /* Changes here will cause the Job to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.prePuller.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with .Values.apps.jupyterhub.prePuller.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ restartPolicy: Never
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
++ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.prePuller.hook.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -58,6 +64,7 @@ spec:
+ - -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
+ - -namespace={{ .Release.Namespace }}
+ - -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
++ - -pod-scheduling-wait-duration={{ .Values.apps.jupyterhub.prePuller.hook.podSchedulingWaitDuration }}
+ {{- with .Values.apps.jupyterhub.prePuller.hook.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
+new file mode 100644
+index 0000000..1a3fca3
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
+@@ -0,0 +1,18 @@
++{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
++{{- if or .Values.apps.jupyterhub.prePuller.hook.enabled .Values.apps.jupyterhub.prePuller.continuous.enabled -}}
++apiVersion: scheduling.k8s.io/v1
++kind: PriorityClass
++metadata:
++ name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
++ annotations:
++ meta.helm.sh/release-name: "{{ .Release.Name }}"
++ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++value: {{ .Values.apps.jupyterhub.scheduling.podPriority.imagePullerPriority }}
++globalDefault: false
++description: >-
++ Enables [hook|continuous]-image-puller pods to fit on nodes even though they
++ are clogged by user-placeholder pods, while not evicting normal user pods.
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+index 95c86dd..5946896 100755
+--- a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+@@ -1,29 +1,8 @@
+ {{- /*
+ Permissions to be used by the hook-image-awaiter job
+ */}}
+-{{- if .Values.apps.jupyterhub.rbac.enabled }}
+-{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) }}
+-{{- /*
+-This service account...
+-*/ -}}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- hub.jupyter.org/deletable: "true"
+- annotations:
+- "helm.sh/hook": pre-install,pre-upgrade
+- "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+- "helm.sh/hook-weight": "0"
+- {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+----
+-{{- /*
+-... will be used by this role...
+-*/}}
++{{- if .Values.apps.jupyterhub.rbac.create -}}
++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+ kind: Role
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -56,7 +35,7 @@ metadata:
+ "helm.sh/hook-weight": "0"
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: Role
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
+new file mode 100644
+index 0000000..2e5fa72
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
+@@ -0,0 +1,21 @@
++{{- /*
++ServiceAccount for the pre-puller hook's image-awaiter-job
++*/}}
++{{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create -}}
++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ hub.jupyter.org/deletable: "true"
++ annotations:
++ "helm.sh/hook": pre-install,pre-upgrade
++ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
++ "helm.sh/hook-weight": "0"
++ {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
+deleted file mode 100755
+index 08bd7bb..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
++++ /dev/null
+@@ -1,9 +0,0 @@
+-# Automatic HTTPS Terminator
+-
+-This directory has Kubernetes objects for automatic Let's Encrypt Support.
+-When enabled, we create a new deployment object that has an nginx-ingress
+-and kube-lego container in it. This is responsible for requesting,
+-storing and renewing certificates as needed from Let's Encrypt.
+-
+-The only change required outside of this directory is in the `proxy-public`
+-service, which targets different hubs based on automatic HTTPS status.
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
+deleted file mode 100755
+index 8d71a97..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
++++ /dev/null
+@@ -1,28 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-{{- $_ := .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
+-
+-# This configmap contains Traefik configuration files to be mounted.
+-# - traefik.yaml will only be read during startup (static configuration)
+-# - dynamic.yaml will be read on change (dynamic configuration)
+-#
+-# ref: https://docs.traefik.io/getting-started/configuration-overview/
+-#
+-# The configuration files are first rendered with Helm templating to large YAML
+-# strings. Then we use the fromYAML function on these strings to get an object,
+-# that we in turn merge with user provided extra configuration.
+-#
+-kind: ConfigMap
+-apiVersion: v1
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-data:
+- traefik.yaml: |
+- {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
+- dynamic.yaml: |
+- {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
+-
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
+deleted file mode 100755
+index fcb062f..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
++++ /dev/null
+@@ -1,141 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-apiVersion: apps/v1
+-kind: Deployment
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-spec:
+- replicas: 1
+- selector:
+- matchLabels:
+- {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+- template:
+- metadata:
+- labels:
+- {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+- hub.jupyter.org/network-access-proxy-http: "true"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.labels }}
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- annotations:
+- # Only force a restart through a change to this checksum when the static
+- # configuration is changed, as the dynamic can be updated after start.
+- # Any disruptions to this deployment impacts everything, it is the
+- # entrypoint of all network traffic.
+- checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
+- spec:
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.autohttps.fullname" . }}
+- {{- end }}
+- {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+- priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+- {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.traefik.nodeSelector }}
+- {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.traefik.tolerations }}
+- tolerations:
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+- volumes:
+- - name: certificates
+- emptyDir: {}
+- - name: traefik-config
+- configMap:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumes }}
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.proxy.traefik.image) }}
+- imagePullSecrets: {{ . }}
+- {{- end }}
+- initContainers:
+- - name: load-acme
+- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- args:
+- - load
+- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+- - acme.json
+- - /etc/acme/acme.json
+- env:
+- # We need this to get logs immediately
+- - name: PYTHONUNBUFFERED
+- value: "True"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
+- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+- {{- end }}
+- volumeMounts:
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- containers:
+- - name: traefik
+- image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.resources }}
+- resources:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- ports:
+- - name: http
+- containerPort: 8080
+- - name: https
+- containerPort: 8443
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPorts }}
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- volumeMounts:
+- - name: traefik-config
+- mountPath: /etc/traefik
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumeMounts }}
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
+- env:
+- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- - name: secret-sync
+- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- args:
+- - watch-save
+- - --label=app={{ include "jupyterhub.appLabel" . }}
+- - --label=release={{ .Release.Name }}
+- - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+- - --label=heritage=secret-sync
+- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+- - acme.json
+- - /etc/acme/acme.json
+- env:
+- # We need this to get logs immediately
+- - name: PYTHONUNBUFFERED
+- value: "True"
+- volumeMounts:
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPodSpec }}
+- {{- . | toYaml | nindent 6 }}
+- {{- end }}
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
+deleted file mode 100755
+index ea43b67..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
++++ /dev/null
+@@ -1,40 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}}
+-apiVersion: rbac.authorization.k8s.io/v1
+-kind: Role
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+-rules:
+-- apiGroups: [""]
+- resources: ["secrets"]
+- verbs: ["get", "patch", "list", "create"]
+----
+-apiVersion: rbac.authorization.k8s.io/v1
+-kind: RoleBinding
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-subjects:
+-- kind: ServiceAccount
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- apiGroup:
+-roleRef:
+- kind: Role
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- apiGroup: rbac.authorization.k8s.io
+----
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
+deleted file mode 100755
+index d57c135..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
++++ /dev/null
+@@ -1,25 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-apiVersion: v1
+-kind: Service
+-metadata:
+- name: {{ include "jupyterhub.proxy-http.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.proxy.service.labels }}
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.service.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+-spec:
+- type: ClusterIP
+- selector:
+- {{- $_ := merge (dict "componentLabel" "proxy") . }}
+- {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+- ports:
+- - port: 8000
+- targetPort: http
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+index 6d63ba8..bb37b8f 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+@@ -7,6 +7,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
++ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+@@ -35,7 +38,7 @@ spec:
+ # match the k8s Secret during the first upgrade following an auth_token
+ # was generated.
+ checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
+- checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
++ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum | quote }}
+ {{- with .Values.apps.jupyterhub.proxy.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+@@ -44,7 +47,10 @@ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.chp.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -135,6 +141,8 @@ spec:
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }}
++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.timeoutSeconds }}
++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+@@ -149,6 +157,8 @@ spec:
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }}
++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.timeoutSeconds }}
++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+index adc8277..88a00be 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+@@ -85,32 +85,24 @@ spec:
+
+ egress:
+ # proxy --> hub
+- - ports:
+- - port: 8081
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8081
+
+ # proxy --> singleuser-server
+- - ports:
+- - port: 8888
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8888
+
+- # proxy --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
+-
+- {{- with .Values.apps.jupyterhub.proxy.chp.networkPolicy.egress }}
+- # proxy --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.proxy.chp.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+index 1846a3b..155895b 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if .Values.apps.jupyterhub.proxy.chp.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.proxy.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
+index 0d9ca5b..f634ba9 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/service.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml
+@@ -35,12 +35,15 @@ metadata:
+ {{- end }}
+ spec:
+ selector:
++ # This service will target the autohttps pod if autohttps is configured, and
++ # the proxy pod if not. When autohttps is configured, the service proxy-http
++ # will be around to target the proxy pod directly.
+ {{- if $autoHTTPS }}
+- component: autohttps
++ {{- $_ := merge (dict "componentLabel" "autohttps") . -}}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+ {{- else }}
+- component: proxy
++ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ {{- end }}
+- release: {{ .Release.Name }}
+ ports:
+ {{- if $HTTPS }}
+ - name: https
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+index 588cf19..1bed905 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+@@ -4,22 +4,9 @@ kind: PriorityClass
+ metadata:
+ name: {{ include "jupyterhub.priority.fullname" . }}
+ annotations:
+- # FIXME: PriorityClasses must be added before the other resources reference
+- # them, and in the past a workaround was needed to accomplish this:
+- # to make the resource a Helm hook.
+- #
+- # To transition this resource to no longer be a Helm hook resource,
+- # we explicitly add ownership annotations/labels (in 1.0.0) which
+- # will allow a future upgrade (in 2.0.0) to remove all hook and
+- # ownership annotations/labels.
+- #
+- helm.sh/hook: pre-install,pre-upgrade
+- helm.sh/hook-delete-policy: before-hook-creation
+- helm.sh/hook-weight: "-100"
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+- app.kubernetes.io/managed-by: Helm
+ {{- $_ := merge (dict "componentLabel" "default-priority") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+index b1dc6c5..800ac20 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+@@ -3,11 +3,7 @@ The cluster autoscaler should be allowed to evict and reschedule these pods if
+ it would help in order to scale down a node.
+ */}}
+ {{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.user-placeholder.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+index e03497d..688e217 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+@@ -5,22 +5,9 @@ kind: PriorityClass
+ metadata:
+ name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ annotations:
+- # FIXME: PriorityClasses must be added before the other resources reference
+- # them, and in the past a workaround was needed to accomplish this:
+- # to make the resource a Helm hook.
+- #
+- # To transition this resource to no longer be a Helm hook resource,
+- # we explicitly add ownership annotations/labels (in 1.0.0) which
+- # will allow a future upgrade (in 2.0.0) to remove all hook and
+- # ownership annotations/labels.
+- #
+- helm.sh/hook: pre-install,pre-upgrade
+- helm.sh/hook-delete-policy: before-hook-creation
+- helm.sh/hook-weight: "-100"
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+- app.kubernetes.io/managed-by: Helm
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }}
+ globalDefault: false
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+index 114f626..c243bee 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+@@ -16,6 +16,9 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
+ podManagementPolicy: Parallel
++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
++ {{- end }}
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }}
+ selector:
+ matchLabels:
+@@ -23,9 +26,16 @@ spec:
+ serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
+ template:
+ metadata:
++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+@@ -33,7 +43,10 @@ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }}
+ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+index ef8a37f..3e83b44 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+@@ -6,16 +6,28 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ data:
+- # ref: https://kubernetes.io/docs/reference/scheduling/config/
++ {{- /*
++ This is configuration of a k8s official kube-scheduler binary running in the
++ user-scheduler.
++
++ ref: https://kubernetes.io/docs/reference/scheduling/config/
++ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
++ */}}
+ config.yaml: |
+- apiVersion: kubescheduler.config.k8s.io/v1beta1
++ apiVersion: kubescheduler.config.k8s.io/v1
+ kind: KubeSchedulerConfiguration
+ leaderElection:
+- resourceLock: endpoints
++ resourceLock: leases
+ resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resourceNamespace: "{{ .Release.Namespace }}"
+ profiles:
+ - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.plugins }}
+ plugins:
+- {{- .Values.apps.jupyterhub.scheduling.userScheduler.plugins | toYaml | nindent 10 }}
++ {{- . | toYaml | nindent 10 }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.pluginConfig }}
++ pluginConfig:
++ {{- . | toYaml | nindent 10 }}
++ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+index 1bcaf31..f22d0de 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+@@ -6,6 +6,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
++ {{- end }}
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }}
+ selector:
+ matchLabels:
+@@ -14,16 +17,25 @@ spec:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ annotations:
+ checksum/config-map: {{ include (print $.Template.BasePath "/jupyterhub/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.annotations }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ spec:
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
++ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.scheduling.userScheduler.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -44,13 +56,6 @@ spec:
+ {{- end }}
+ command:
+ - /usr/local/bin/kube-scheduler
+- # NOTE: --leader-elect-... (new) and --lock-object-... (deprecated)
+- # flags are silently ignored in favor of whats defined in the
+- # passed KubeSchedulerConfiguration whenever --config is
+- # passed.
+- #
+- # ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/
+- #
+ # NOTE: --authentication-skip-lookup=true is used to avoid a
+ # seemingly harmless error, if we need to not skip
+ # "authentication lookup" in the future, see the linked issue.
+@@ -65,12 +70,14 @@ spec:
+ livenessProbe:
+ httpGet:
+ path: /healthz
+- port: 10251
++ scheme: HTTPS
++ port: 10259
+ initialDelaySeconds: 15
+ readinessProbe:
+ httpGet:
+ path: /healthz
+- port: 10251
++ scheme: HTTPS
++ port: 10259
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+index 04f2af8..2c9c6de 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if and .Values.apps.jupyterhub.scheduling.userScheduler.enabled .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+index 083e065..9c7fab7 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+@@ -1,16 +1,5 @@
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+-{{- if .Values.apps.jupyterhub.rbac.enabled }}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+----
++{{- if .Values.apps.jupyterhub.rbac.create -}}
+ kind: ClusterRole
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -19,13 +8,23 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ rules:
+ # Copied from the system:kube-scheduler ClusterRole of the k8s version
+- # matching the kube-scheduler binary we use. A modification of two resource
+- # name references from kube-scheduler to user-scheduler-lock was made.
++ # matching the kube-scheduler binary we use. A modification has been made to
++ # resourceName fields to remain relevant for how we have named our resources
++ # in this Helm chart.
+ #
+- # NOTE: These rules have been unchanged between 1.12 and 1.15, then changed in
+- # 1.16 and in 1.17, but unchanged in 1.18 and 1.19.
++ # NOTE: These rules have been:
++ # - unchanged between 1.12 and 1.15
++ # - changed in 1.16
++ # - changed in 1.17
++ # - unchanged between 1.18 and 1.20
++ # - changed in 1.21: get/list/watch permission for namespace,
++ # csidrivers, csistoragecapacities was added.
++ # - unchanged between 1.22 and 1.27
++ # - changed in 1.28: permissions to get/update lock endpoint resource
++ # removed
++ # - unchanged between 1.28 and 1.29
+ #
+- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L696-L829
++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862
+ - apiGroups:
+ - ""
+ - events.k8s.io
+@@ -50,21 +49,6 @@ rules:
+ verbs:
+ - get
+ - update
+- - apiGroups:
+- - ""
+- resources:
+- - endpoints
+- verbs:
+- - create
+- - apiGroups:
+- - ""
+- resourceNames:
+- - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+- resources:
+- - endpoints
+- verbs:
+- - get
+- - update
+ - apiGroups:
+ - ""
+ resources:
+@@ -159,13 +143,37 @@ rules:
+ - get
+ - list
+ - watch
++ - apiGroups:
++ - ""
++ resources:
++ - namespaces
++ verbs:
++ - get
++ - list
++ - watch
++ - apiGroups:
++ - storage.k8s.io
++ resources:
++ - csidrivers
++ verbs:
++ - get
++ - list
++ - watch
++ - apiGroups:
++ - storage.k8s.io
++ resources:
++ - csistoragecapacities
++ verbs:
++ - get
++ - list
++ - watch
+
+ # Copied from the system:volume-scheduler ClusterRole of the k8s version
+ # matching the kube-scheduler binary we use.
+ #
+- # NOTE: These rules have not changed between 1.12 and 1.19.
++ # NOTE: These rules have not changed between 1.12 and 1.29.
+ #
+- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1213-L1240
++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310
+ - apiGroups:
+ - ""
+ resources:
+@@ -203,7 +211,7 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: ClusterRole
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
+new file mode 100644
+index 0000000..67618b0
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
+@@ -0,0 +1,14 @@
++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+index 3dfb137..931a150 100755
+--- a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+@@ -62,23 +62,38 @@ spec:
+
+ egress:
+ # singleuser-server --> hub
+- - ports:
+- - port: 8081
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8081
+
+- # singleuser-server --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
++ # singleuser-server --> proxy
++ # singleuser-server --> autohttps
++ #
++ # While not critical for core functionality, a user or library code may rely
++ # on communicating with the proxy or autohttps pods via a k8s Service it can
++ # detected from well known environment variables.
++ #
++ - to:
++ - podSelector:
++ matchLabels:
++ {{- $_ := merge (dict "componentLabel" "proxy") . }}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8000
++ - to:
++ - podSelector:
++ matchLabels:
++ {{- $_ := merge (dict "componentLabel" "autohttps") . }}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8080
++ - port: 8443
+
+- {{- with .Values.apps.jupyterhub.singleuser.networkPolicy.egress }}
+- # singleuser-server --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.singleuser.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/singleuser/secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
+new file mode 100644
+index 0000000..e6eab9b
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
+@@ -0,0 +1,17 @@
++{{- if .Values.apps.jupyterhub.singleuser.extraFiles }}
++kind: Secret
++apiVersion: v1
++metadata:
++ name: {{ include "jupyterhub.singleuser.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++type: Opaque
++{{- with include "jupyterhub.extraFiles.data" .Values.apps.jupyterhub.singleuser.extraFiles }}
++data:
++ {{- . | nindent 2 }}
++{{- end }}
++{{- with include "jupyterhub.extraFiles.stringData" .Values.apps.jupyterhub.singleuser.extraFiles }}
++stringData:
++ {{- . | nindent 2 }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/values.schema.yaml b/applications/jupyterhub/deploy/values.schema.yaml
+new file mode 100644
+index 0000000..69c13a8
+--- /dev/null
++++ b/applications/jupyterhub/deploy/values.schema.yaml
+@@ -0,0 +1,3014 @@
++# This schema (a jsonschema in YAML format) is used to generate
++# values.schema.json which is packaged with the Helm chart for client side
++# validation by helm of values before template rendering.
++#
++# This schema is also used by our documentation system to build the
++# configuration reference section based on the description fields. See
++# docs/source/conf.py for that logic!
++#
++# We look to document everything we have default values for in values.yaml, but
++# we don't look to enforce the perfect validation logic within this file.
++#
++# ref: https://json-schema.org/learn/getting-started-step-by-step.html
++#
++$schema: http://json-schema.org/draft-07/schema#
++type: object
++additionalProperties: false
++required:
++ - imagePullSecrets
++ - hub
++ - proxy
++ - singleuser
++ - ingress
++ - prePuller
++ - custom
++ - cull
++ - debug
++ - rbac
++ - global
++properties:
++ enabled:
++ type: [boolean, "null"]
++ description: |
++ `enabled` is ignored by the jupyterhub chart itself, but a chart depending
++ on the jupyterhub chart conditionally can make use this config option as
++ the condition.
++ fullnameOverride:
++ type: [string, "null"]
++ description: |
++ fullnameOverride and nameOverride allow you to adjust how the resources
++ part of the Helm chart are named.
++
++ Name format | Resource types | fullnameOverride | nameOverride | Note
++ ------------------------- | -------------- | ---------------- | ------------ | -
++ component | namespaced | `""` | * | Default
++ release-component | cluster wide | `""` | * | Default
++ fullname-component | * | str | * | -
++ release-component | * | null | `""` | -
++ release-(name-)component | * | null | str | omitted if contained in release
++ release-(chart-)component | * | null | null | omitted if contained in release
++
++ ```{admonition} Warning!
++ :class: warning
++ Changing fullnameOverride or nameOverride after the initial installation
++ of the chart isn't supported. Changing their values likely leads to a
++ reset of non-external JupyterHub databases, abandonment of users' storage,
++ and severed couplings to currently running user pods.
++ ```
++
++ If you are a developer of a chart depending on this chart, you should
++ avoid hardcoding names. If you want to reference the name of a resource in
++ this chart from a parent helm chart's template, you can make use of the
++ global named templates instead.
++
++ ```yaml
++ # some pod definition of a parent chart helm template
++ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ ```
++
++ To access them from a container, you can also rely on the hub ConfigMap
++ that contains entries of all the resource names.
++
++ ```yaml
++ # some container definition in a parent chart helm template
++ env:
++ - name: SCHEDULER_NAME
++ valueFrom:
++ configMapKeyRef:
++ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ key: user-scheduler
++ ```
++
++ nameOverride:
++ type: [string, "null"]
++ description: |
++ See the documentation under [`fullnameOverride`](schema_fullnameOverride).
++
++ imagePullSecret:
++ type: object
++ required: [create]
++ if:
++ properties:
++ create:
++ const: true
++ then:
++ additionalProperties: false
++ required: [registry, username, password]
++ description: |
++ This is configuration to create a k8s Secret resource of `type:
++ kubernetes.io/dockerconfigjson`, with credentials to pull images from a
++ private image registry. If you opt to do so, it will be available for use
++ by all pods in their respective `spec.imagePullSecrets` alongside other
++ k8s Secrets defined in `imagePullSecrets` or the pod respective
++ `...image.pullSecrets` configuration.
++
++ In other words, using this configuration option can automate both the
++ otherwise manual creation of a k8s Secret and the otherwise manual
++ configuration to reference this k8s Secret in all the pods of the Helm
++ chart.
++
++ ```sh
++ # you won't need to create a k8s Secret manually...
++ kubectl create secret docker-registry image-pull-secret \
++ --docker-server= \
++ --docker-username= \
++ --docker-email= \
++ --docker-password=
++ ```
++
++ If you just want to let all Pods reference an existing secret, use the
++ [`imagePullSecrets`](schema_imagePullSecrets) configuration instead.
++ properties:
++ create:
++ type: boolean
++ description: |
++ Toggle the creation of the k8s Secret with provided credentials to
++ access a private image registry.
++ automaticReferenceInjection:
++ type: boolean
++ description: |
++ Toggle the automatic reference injection of the created Secret to all
++ pods' `spec.imagePullSecrets` configuration.
++ registry:
++ type: string
++ description: |
++ Name of the private registry you want to create a credential set for.
++ It will default to Docker Hub's image registry.
++
++ Examples:
++ - https://index.docker.io/v1/
++ - quay.io
++ - eu.gcr.io
++ - alexmorreale.privatereg.net
++ username:
++ type: string
++ description: |
++ Name of the user you want to use to connect to your private registry.
++
++ For external gcr.io, you will use the `_json_key`.
++
++ Examples:
++ - alexmorreale
++ - alex@pfc.com
++ - _json_key
++ password:
++ type: string
++ description: |
++ Password for the private image registry's user.
++
++ Examples:
++ - plaintextpassword
++ - abc123SECRETzyx098
++
++ For gcr.io registries the password will be a big JSON blob for a
++ Google cloud service account, it should look something like below.
++
++ ```yaml
++ password: |-
++ {
++ "type": "service_account",
++ "project_id": "jupyter-se",
++ "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85",
++ ...
++ }
++ ```
++ email:
++ type: [string, "null"]
++ description: |
++ Specification of an email is most often not required, but it is
++ supported.
++
++ imagePullSecrets:
++ type: array
++ description: |
++ Chart wide configuration to _append_ k8s Secret references to all its
++ pod's `spec.imagePullSecrets` configuration.
++
++ This will not override or get overridden by pod specific configuration,
++ but instead augment the pod specific configuration.
++
++ You can use both the k8s native syntax, where each list element is like
++ `{"name": "my-secret-name"}`, or you can let list elements be strings
++ naming the secrets directly.
++
++ hub:
++ type: object
++ additionalProperties: false
++ required: [baseUrl]
++ properties:
++ revisionHistoryLimit: &revisionHistoryLimit
++ type: [integer, "null"]
++ minimum: 0
++ description: |
++ Configures the resource's `spec.revisionHistoryLimit`. This is
++ available for Deployment, StatefulSet, and DaemonSet resources.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)
++ for more info.
++ config:
++ type: object
++ additionalProperties: true
++ description: |
++ JupyterHub and its components (authenticators, spawners, etc), are
++ Python classes that expose its configuration through
++ [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this
++ Helm chart configuration (`hub.config`), you can directly configure
++ the Python classes through _static_ YAML values. To _dynamically_ set
++ values, you need to use [`hub.extraConfig`](schema_hub.extraConfig)
++ instead.
++
++ ```{admonition} Currently intended only for auth config
++ :class: warning
++ This config _currently_ (0.11.0) only influence the software in the
++ `hub` Pod, but some Helm chart config options such as
++ [`hub.baseUrl`](schema_hub.baseUrl) is used to set
++ `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm
++ templates are rendered.
++
++ As we have not yet mapped out all the potential configuration
++ conflicts except for the authentication related configuration options,
++ please accept that using it for something else at this point can lead
++ to issues.
++ ```
++
++ __Example__
++
++ If you inspect documentation or some `jupyterhub_config.py` to contain
++ the following section:
++
++ ```python
++ c.JupyterHub.admin_access = true
++ c.JupyterHub.admin_users = ["jovyan1", "jovyan2"]
++ c.KubeSpawner.k8s_api_request_timeout = 10
++ c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"]
++ ```
++
++ Then, you would be able to represent it with this configuration like:
++
++ ```yaml
++ hub:
++ config:
++ JupyterHub:
++ admin_access: true
++ admin_users:
++ - jovyan1
++ - jovyan2
++ KubeSpawner:
++ k8s_api_request_timeout: 10
++ GitHubOAuthenticator:
++ allowed_organizations:
++ - jupyterhub
++ ```
++
++ ```{admonition} YAML limitations
++ :class: tip
++ You can't represent Python `Bytes` or `Set` objects in YAML directly.
++ ```
++
++ ```{admonition} Helm value merging
++ :class: tip
++ `helm` merges a Helm chart's default values with values passed with
++ the `--values` or `-f` flag. During merging, lists are replaced while
++ dictionaries are updated.
++ ```
++ extraFiles: &extraFiles
++ type: object
++ additionalProperties: false
++ description: |
++ A dictionary with extra files to be injected into the pod's container
++ on startup. This can for example be used to inject: configuration
++ files, custom user interface templates, images, and more.
++
++ ```yaml
++ # NOTE: "hub" is used in this example, but the configuration is the
++ # same for "singleuser".
++ hub:
++ extraFiles:
++ # The file key is just a reference that doesn't influence the
++ # actual file name.
++ :
++ # mountPath is required and must be the absolute file path.
++ mountPath:
++
++ # Choose one out of the three ways to represent the actual file
++ # content: data, stringData, or binaryData.
++ #
++ # data should be set to a mapping (dictionary). It will in the
++ # end be rendered to either YAML, JSON, or TOML based on the
++ # filename extension that are required to be either .yaml, .yml,
++ # .json, or .toml.
++ #
++ # If your content is YAML, JSON, or TOML, it can make sense to
++ # use data to represent it over stringData as data can be merged
++ # instead of replaced if set partially from separate Helm
++ # configuration files.
++ #
++ # Both stringData and binaryData should be set to a string
++ # representing the content, where binaryData should be the
++ # base64 encoding of the actual file content.
++ #
++ data:
++ myConfig:
++ myMap:
++ number: 123
++ string: "hi"
++ myList:
++ - 1
++ - 2
++ stringData: |
++ hello world!
++ binaryData: aGVsbG8gd29ybGQhCg==
++
++ # mode is by default 0644 and you can optionally override it
++ # either by octal notation (example: 0400) or decimal notation
++ # (example: 256).
++ mode:
++ ```
++
++ **Using --set-file**
++
++ To avoid embedding entire files in the Helm chart configuration, you
++ can use the `--set-file` flag during `helm upgrade` to set the
++ stringData or binaryData field.
++
++ ```yaml
++ hub:
++ extraFiles:
++ my_image:
++ mountPath: /usr/local/share/jupyterhub/static/my_image.png
++
++ # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are
++ # automatically loaded in alphabetical order of the final file
++ # name when JupyterHub starts.
++ my_config:
++ mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py
++ ```
++
++ ```bash
++ # --set-file expects a text based file, so you need to base64 encode
++ # it manually first.
++ base64 my_image.png > my_image.png.b64
++
++ helm upgrade <...> \
++ --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \
++ --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py
++ ```
++
++ **Common uses**
++
++ 1. **JupyterHub template customization**
++
++ You can replace the default JupyterHub user interface templates in
++ the hub pod by injecting new ones to
++ `/usr/local/share/jupyterhub/templates`. These can in turn
++ reference custom images injected to
++ `/usr/local/share/jupyterhub/static`.
++
++ 1. **JupyterHub standalone file config**
++
++ Instead of embedding JupyterHub python configuration as a string
++ within a YAML file through
++ [`hub.extraConfig`](schema_hub.extraConfig), you can inject a
++ standalone .py file into
++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is
++ automatically loaded.
++
++ 1. **Flexible configuration**
++
++ By injecting files, you don't have to embed them in a docker image
++ that you have to rebuild.
++
++ If your configuration file is a YAML/JSON/TOML file, you can also
++ use `data` instead of `stringData` which allow you to set various
++ configuration in separate Helm config files. This can be useful to
++ help dependent charts override only some configuration part of the
++ file, or to allow for the configuration be set through multiple
++ Helm configuration files.
++
++ **Limitations**
++
++ 1. File size
++
++ The files in `hub.extraFiles` and `singleuser.extraFiles` are
++ respectively stored in their own k8s Secret resource. As k8s
++ Secret's are limited, typically to 1MB, you will be limited to a
++ total file size of less than 1MB as there is also base64 encoding
++ that takes place reducing available capacity to 75%.
++
++ 2. File updates
++
++ The files that are mounted are only set during container startup.
++ This is [because we use
++ `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)
++ as is required to avoid replacing the content of the entire
++ directory we mount in.
++ patternProperties:
++ ".*":
++ type: object
++ additionalProperties: false
++ required: [mountPath]
++ oneOf:
++ - required: [data]
++ - required: [stringData]
++ - required: [binaryData]
++ properties:
++ mountPath:
++ type: string
++ data:
++ type: object
++ additionalProperties: true
++ stringData:
++ type: string
++ binaryData:
++ type: string
++ mode:
++ type: number
++ baseUrl:
++ type: string
++ description: |
++ This is the equivalent of c.JupyterHub.base_url, but it is also needed
++ by the Helm chart in general. So, instead of setting
++ c.JupyterHub.base_url, use this configuration.
++ command:
++ type: array
++ description: |
++ A list of strings to be used to replace the JupyterHub image's
++ `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's
++ `ENTRYPOINT` is called `command`. The list of strings will be expanded
++ with Helm's template function `tpl` which can render Helm template
++ logic inside curly braces (`{{... }}`).
++
++ This could be useful to wrap the invocation of JupyterHub itself in
++ some custom way.
++
++ For more details, see the [Kubernetes
++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
++ args:
++ type: array
++ description: |
++ A list of strings to be used to replace the JupyterHub image's `CMD`
++ entry as well as the Helm chart's default way to start JupyterHub.
++ Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The
++ list of strings will be expanded with Helm's template function `tpl`
++ which can render Helm template logic inside curly braces (`{{... }}`).
++
++ ```{warning}
++ By replacing the entire configuration file, which is mounted to
++ `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart,
++ instead of appending to it with `hub.extraConfig`, you expose your
++ deployment for issues stemming from getting out of sync with the Helm
++ chart's config file.
++
++ These kind of issues will be significantly harder to debug and
++ diagnose, and can due to this could cause a lot of time expenditure
++ for both the community maintaining the Helm chart as well as yourself,
++ even if this wasn't the reason for the issue.
++
++ Due to this, we ask that you do your _absolute best to avoid replacing
++ the default provided `jupyterhub_config.py` file. It can often be
++ possible. For example, if your goal is to have a dedicated .py file
++ for more extensive additions that you can syntax highlight and such
++ and feel limited by passing code in `hub.extraConfig` which is part of
++ a YAML file, you can use [this
++ trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237)
++ instead.
++ ```
++
++ ```yaml
++ hub:
++ args:
++ - "jupyterhub"
++ - "--config"
++ - "/usr/local/etc/jupyterhub/jupyterhub_config.py"
++ - "--debug"
++ - "--upgrade-db"
++ ```
++
++ For more details, see the [Kubernetes
++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
++ cookieSecret:
++ type: [string, "null"]
++ description: |
++ ```{note}
++ As of version 1.0.0 this will automatically be generated and there is
++ no need to set it manually.
++
++ If you wish to reset a generated key, you can use `kubectl edit` on
++ the k8s Secret typically named `hub` and remove the
++ `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then
++ perform a new `helm upgrade`.
++ ```
++
++ A 32-byte cryptographically secure randomly generated string used to sign values of
++ secure cookies set by the hub. If unset, jupyterhub will generate one on startup and
++ save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of
++ the hub container. A value set here will make JupyterHub overwrite any previous file.
++
++ You do not need to set this at all if you are using the default configuration for
++ storing databases - sqlite on a persistent volume (with `hub.db.type` set to the
++ default `sqlite-pvc`). If you are using an external database, then you must set this
++ value explicitly - or your users will keep getting logged out each time the hub pod
++ restarts.
++
++ Changing this value will all user logins to be invalidated. If this secret leaks,
++ *immediately* change it to something else, or user data can be compromised
++
++ ```sh
++ # to generate a value, run
++ openssl rand -hex 32
++ ```
++ image: &image-spec
++ type: object
++ additionalProperties: false
++ required: [name, tag]
++ description: |
++ Set custom image name, tag, pullPolicy, or pullSecrets for the pod.
++ properties:
++ name:
++ type: string
++ description: |
++ The name of the image, without the tag.
++
++ ```
++ # example name
++ gcr.io/my-project/my-image
++ ```
++ tag:
++ type: string
++ description: |
++ The tag of the image to pull. This is the value following `:` in
++ complete image specifications.
++
++ ```
++ # example tags
++ v1.11.1
++ zhy270a
++ ```
++ pullPolicy:
++ enum: [null, "", IfNotPresent, Always, Never]
++ description: |
++ Configures the Pod's `spec.imagePullPolicy`.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)
++ for more info.
++ pullSecrets:
++ type: array
++ description: |
++ A list of references to existing Kubernetes Secrets with
++ credentials to pull the image.
++
++ This Pod's final `imagePullSecrets` k8s specification will be a
++ combination of:
++
++ 1. This list of k8s Secrets, specific for this pod.
++ 2. The list of k8s Secrets, for use by all pods in the Helm chart,
++ declared in this Helm charts configuration called
++ `imagePullSecrets`.
++ 3. A k8s Secret, for use by all pods in the Helm chart, if
++ conditionally created from image registry credentials provided
++ under `imagePullSecret` if `imagePullSecret.create` is set to
++ true.
++
++ ```yaml
++ # example - k8s native syntax
++ pullSecrets:
++ - name: my-k8s-secret-with-image-registry-credentials
++
++ # example - simplified syntax
++ pullSecrets:
++ - my-k8s-secret-with-image-registry-credentials
++ ```
++ networkPolicy: &networkPolicy-spec
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration regards the creation and configuration of a k8s
++ _NetworkPolicy resource_.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Toggle the creation of the NetworkPolicy resource targeting this
++ pod, and by doing so, restricting its communication to only what
++ is explicitly allowed in the NetworkPolicy.
++ ingress:
++ type: array
++ description: |
++ Additional ingress rules to add besides those that are required
++ for core functionality.
++ egress:
++ type: array
++ description: |
++ Additional egress rules to add besides those that are required for
++ core functionality and those added via
++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).
++
++ ```{versionchanged} 2.0.0
++ The default value changed from providing one very permissive rule
++ allowing all egress to providing no rule. The permissive rule is
++ still provided via
++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)
++ set to true though.
++ ```
++
++ As an example, below is a configuration that disables the more
++ broadly permissive `.privateIPs` egress allow rule for the hub
++ pod, and instead provides tightly scoped permissions to access a
++ specific k8s local service as identified by pod labels.
++
++ ```yaml
++ hub:
++ networkPolicy:
++ egressAllowRules:
++ privateIPs: false
++ egress:
++ - to:
++ - podSelector:
++ matchLabels:
++ app: my-k8s-local-service
++ ports:
++ - protocol: TCP
++ port: 5978
++ ```
++ egressAllowRules:
++ type: object
++ additionalProperties: false
++ description: |
++ This is a set of predefined rules that when enabled will be added
++ to the NetworkPolicy list of egress rules.
++
++ The resulting egress rules will be a composition of:
++ - rules specific for the respective pod(s) function within the
++ Helm chart
++ - rules based on enabled `egressAllowRules` flags
++ - rules explicitly specified by the user
++
++ ```{note}
++ Each flag under this configuration will not render into a
++ dedicated rule in the NetworkPolicy resource, but instead combine
++ with the other flags to a reduced set of rules to avoid a
++ performance penalty.
++ ```
++
++ ```{versionadded} 2.0.0
++ ```
++ properties:
++ cloudMetadataServer:
++ type: boolean
++ description: |
++ Defaults to `false` for singleuser servers, but to `true` for
++ all other network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the cloud metadata server.
++
++ Note that the `nonPrivateIPs` rule is allowing all non Private
++ IP ranges but makes an exception for the cloud metadata
++ server, leaving this as the definitive configuration to allow
++ access to the cloud metadata server.
++
++ ```{versionchanged} 3.0.0
++ This configuration is not allowed to be configured true at the
++ same time as
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ to avoid an ambiguous configuration.
++ ```
++ dnsPortsCloudMetadataServer:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the cloud metadata server
++ via port 53.
++
++ Relying on this rule for the singleuser config should go hand
++ in hand with disabling
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ to avoid an ambiguous configuration.
++
++ Known situations when this rule can be relevant:
++
++ - In GKE clusters with Cloud DNS that is reached at the
++ cloud metadata server's non-private IP.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++ ```
++
++ ```{versionadded} 3.0.0
++ ```
++ dnsPortsKubeSystemNamespace:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to pods in the kube-system
++ namespace via port 53.
++
++ Known situations when this rule can be relevant:
++
++ - GKE, EKS, AKS, and other clusters relying directly on
++ `kube-dns` or `coredns` pods in the `kube-system` namespace.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++ ```
++
++ ```{versionadded} 3.0.0
++ ```
++ dnsPortsPrivateIPs:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to private IPs via port 53.
++
++ Known situations when this rule can be relevant:
++
++ - GKE clusters relying on a DNS server indirectly via a a node
++ local DNS cache at an unknown private IP.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++
++ ```{warning}
++ This rule is not expected to work in clusters relying on
++ Cilium to enforce the NetworkPolicy rules (includes GKE
++ clusters with Dataplane v2), this is due to a [known
++ limitation](https://github.com/cilium/cilium/issues/9209).
++ ```
++ nonPrivateIPs:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the non-private IP ranges
++ with the exception of the cloud metadata server. This means
++ respective pod(s) can establish connections to the internet
++ but not (say) an unsecured prometheus server running in the
++ same cluster.
++ privateIPs:
++ type: boolean
++ description: |
++ Defaults to `false` for singleuser servers, but to `true` for
++ all other network policies.
++
++ Private IPs refer to the IP ranges `10.0.0.0/8`,
++ `172.16.0.0/12`, `192.168.0.0/16`.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the internal k8s cluster.
++ This means users can access the internet but not (say) an
++ unsecured prometheus server running in the same cluster.
++
++ Since not all workloads in the k8s cluster may have
++ NetworkPolicies setup to restrict their incoming connections,
++ having this set to false can be a good defense against
++ malicious intent from someone in control of software in these
++ pods.
++
++ If possible, try to avoid setting this to true as it gives
++ broad permissions that could be specified more directly via
++ the [`.egress`](schema_singleuser.networkPolicy.egress).
++
++ ```{warning}
++ This rule is not expected to work in clusters relying on
++ Cilium to enforce the NetworkPolicy rules (includes GKE
++ clusters with Dataplane v2), this is due to a [known
++ limitation](https://github.com/cilium/cilium/issues/9209).
++ ```
++ interNamespaceAccessLabels:
++ enum: [accept, ignore]
++ description: |
++ This configuration option determines if both namespaces and pods
++ in other namespaces, that have specific access labels, should be
++ accepted to allow ingress (set to `accept`), or, if the labels are
++ to be ignored when applied outside the local namespace (set to
++ `ignore`).
++
++ The available access labels for respective NetworkPolicy resources
++ are:
++
++ - `hub.jupyter.org/network-access-hub: "true"` (hub)
++ - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik)
++ - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp)
++ - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser)
++ allowedIngressPorts:
++ type: array
++ description: |
++ A rule to allow ingress on these ports will be added no matter
++ what the origin of the request is. The default setting for
++ `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is
++ `[http, https]`, while it is `[]` for other networkPolicies.
++
++ Note that these port names or numbers target a Pod's port name or
++ number, not a k8s Service's port name or number.
++ db:
++ type: object
++ additionalProperties: false
++ properties:
++ type:
++ enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other]
++ description: |
++ Type of database backend to use for the hub database.
++
++ The Hub requires a persistent database to function, and this lets you specify
++ where it should be stored.
++
++ The various options are:
++
++ 1. **sqlite-pvc**
++
++ Use an `sqlite` database kept on a persistent volume attached to the hub.
++
++ By default, this disk is created by the cloud provider using
++ *dynamic provisioning* configured by a [storage
++ class](https://kubernetes.io/docs/concepts/storage/storage-classes/).
++ You can customize how this disk is created / attached by
++ setting various properties under `hub.db.pvc`.
++
++ This is the default setting, and should work well for most cloud provider
++ deployments.
++
++ 2. **sqlite-memory**
++
++ Use an in-memory `sqlite` database. This should only be used for testing,
++ since the database is erased whenever the hub pod restarts - causing the hub
++ to lose all memory of users who had logged in before.
++
++ When using this for testing, make sure you delete all other objects that the
++ hub has created (such as user pods, user PVCs, etc) every time the hub restarts.
++ Otherwise you might run into errors about duplicate resources.
++
++ 3. **mysql**
++
++ Use an externally hosted mysql database.
++
++ You have to specify an sqlalchemy connection string for the mysql database you
++ want to connect to in `hub.db.url` if using this option.
++
++ The general format of the connection string is:
++ ```
++ mysql+pymysql://:@:/
++ ```
++
++ The user specified in the connection string must have the rights to create
++ tables in the database specified.
++
++ 4. **postgres**
++
++ Use an externally hosted postgres database.
++
++ You have to specify an sqlalchemy connection string for the postgres database you
++ want to connect to in `hub.db.url` if using this option.
++
++ The general format of the connection string is:
++ ```
++ postgresql+psycopg2://:@:/
++ ```
++
++ The user specified in the connection string must have the rights to create
++ tables in the database specified.
++
++ 5. **other**
++
++ Use an externally hosted database of some kind other than mysql
++ or postgres.
++
++ When using _other_, the database password must be passed as
++ part of [hub.db.url](schema_hub.db.url) as
++ [hub.db.password](schema_hub.db.password) will be ignored.
++ pvc:
++ type: object
++ additionalProperties: false
++ required: [storage]
++ description: |
++ Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`.
++ properties:
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: &labels-and-annotations-patternProperties
++ ".*":
++ type: string
++ description: |
++ Annotations to apply to the PVC containing the sqlite database.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ selector:
++ type: object
++ additionalProperties: true
++ description: |
++ Label selectors to set for the PVC containing the sqlite database.
++
++ Useful when you are using a specific PV, and want to bind to
++ that and only that.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
++ for more details about using a label selector for what PV to
++ bind to.
++ storage:
++ type: string
++ description: |
++ Size of disk to request for the database disk.
++ accessModes:
++ type: array
++ items:
++ type: [string, "null"]
++ description: |
++ AccessModes contains the desired access modes the volume
++ should have. See [the k8s
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1)
++ for more information.
++ storageClassName:
++ type: [string, "null"]
++ description: |
++ Name of the StorageClass required by the claim.
++
++ If this is a blank string it will be set to a blank string,
++ while if it is null, it will not be set at all.
++ subPath:
++ type: [string, "null"]
++ description: |
++ Path within the volume from which the container's volume
++ should be mounted. Defaults to "" (volume's root).
++ upgrade:
++ type: [boolean, "null"]
++ description: |
++ Users with external databases need to opt-in for upgrades of the
++ JupyterHub specific database schema if needed as part of a
++ JupyterHub version upgrade.
++ url:
++ type: [string, "null"]
++ description: |
++ Connection string when `hub.db.type` is mysql or postgres.
++
++ See documentation for `hub.db.type` for more details on the format of this property.
++ password:
++ type: [string, "null"]
++ description: |
++ Password for the database when `hub.db.type` is mysql or postgres.
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the hub pod.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ initContainers:
++ type: array
++ description: |
++ list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ hub:
++ initContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the hub pod.
++
++ Environment variables are usually used to:
++ - Pass parameters to some custom code in `hub.extraConfig`.
++ - Configure code running in the hub pod, such as an authenticator or
++ spawner.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ hub:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ HUB_NAMESPACE:
++ name: HUB_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_HUB_NAMESPACE:
++ value: "my-prefix-$(HUB_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ extraConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ Arbitrary extra python based configuration that should be in `jupyterhub_config.py`.
++
++ This is the *escape hatch* - if you want to configure JupyterHub to do something specific
++ that is not present here as an option, you can write the raw Python to do it here.
++
++ extraConfig is a *dict*, so there can be multiple configuration
++ snippets under different names. The configuration sections are run in
++ alphabetical order based on the keys.
++
++ Non-exhaustive examples of things you can do here:
++ - Subclass authenticator / spawner to do a custom thing
++ - Dynamically launch different images for different sets of images
++ - Inject an auth token from GitHub authenticator into user pod
++ - Anything else you can think of!
++
++ Since this is usually a multi-line string, you want to format it using YAML's
++ [| operator](https://yaml.org/spec/1.2.2/#23-scalars).
++
++ For example:
++
++ ```yaml
++ hub:
++ extraConfig:
++ myConfig.py: |
++ c.JupyterHub.something = 'something'
++ c.Spawner.something_else = 'something else'
++ ```
++
++ ```{note}
++ No code validation is performed until JupyterHub loads it! If you make
++ a typo here, it will probably manifest itself as the hub pod failing
++ to start up and instead entering an `Error` state or the subsequent
++ `CrashLoopBackoff` state.
++
++ To make use of your own programs linters etc, it would be useful to
++ not embed Python code inside a YAML file. To do that, consider using
++ [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to
++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your
++ extra configuration logic.
++ ```
++
++ fsGid:
++ type: [integer, "null"]
++ minimum: 0
++ # This schema entry is needed to help us print a more helpful error
++ # message in NOTES.txt if hub.fsGid is set.
++ #
++ description: |
++ ```{note}
++ Removed in version 2.0.0. Use
++ [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify
++ `fsGroup` instead.
++ ```
++ service:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to configure the service the JupyterHub will be exposed on by the Kubernetes server.
++ properties:
++ type:
++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
++ description: |
++ The Kubernetes ServiceType to be used.
++
++ The default type is `ClusterIP`.
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
++ to learn more about service types.
++ ports:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to configure the ports the hub service will be deployed on.
++ properties:
++ nodePort:
++ type: [integer, "null"]
++ minimum: 0
++ description: |
++ The nodePort to deploy the hub service on.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Kubernetes annotations to apply to the hub service.
++ extraPorts:
++ type: array
++ description: |
++ Extra ports to add to the Hub Service object besides `hub` / `8081`.
++ This should be an array that includes `name`, `port`, and `targetPort`.
++ See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details.
++ loadBalancerIP:
++ type: [string, "null"]
++ description: |
++ A public IP address the hub Kubernetes service should be exposed
++ on. To expose the hub directly is not recommended. Instead route
++ traffic through the proxy-public service towards the hub.
++
++ pdb: &pdb-spec
++ type: object
++ additionalProperties: false
++ description: |
++ Configure a PodDisruptionBudget for this Deployment.
++
++ These are disabled by default for our deployments that don't support
++ being run in parallel with multiple replicas. Only the user-scheduler
++ currently supports being run in parallel with multiple replicas. If
++ they are enabled for a Deployment with only one replica, they will
++ block `kubectl drain` of a node for example.
++
++ Note that if you aim to block scaling down a node with the
++ hub/proxy/autohttps pod that would cause disruptions of the
++ deployment, then you should instead annotate the pods of the
++ Deployment [as described
++ here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).
++
++ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)
++ for more details about disruptions.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Decides if a PodDisruptionBudget is created targeting the
++ Deployment's pods.
++ maxUnavailable:
++ type: [integer, "null"]
++ description: |
++ The maximum number of pods that can be unavailable during
++ voluntary disruptions.
++ minAvailable:
++ type: [integer, "null"]
++ description: |
++ The minimum number of pods required to be available during
++ voluntary disruptions.
++ existingSecret:
++ type: [string, "null"]
++ description: |
++ This option allow you to provide the name of an existing k8s Secret to
++ use alongside of the chart managed k8s Secret. The content of this k8s
++ Secret will be merged with the chart managed k8s Secret, giving
++ priority to the self-managed k8s Secret.
++
++ ```{warning}
++ 1. The self managed k8s Secret must mirror the structure in the chart
++ managed secret.
++ 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka.
++ `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from
++ the chart managed k8s Secret.
++ ```
++ nodeSelector: &nodeSelector-spec
++ type: object
++ additionalProperties: true
++ description: |
++ An object with key value pairs representing labels. K8s Nodes are
++ required to have match all these labels for this Pod to scheduled on
++ them.
++
++ ```yaml
++ disktype: ssd
++ nodetype: awesome
++ ```
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)
++ for more details.
++ tolerations: &tolerations-spec
++ type: array
++ description: |
++ Tolerations allow a pod to be scheduled on nodes with taints. These
++ tolerations are additional tolerations to the tolerations common to
++ all pods of a their respective kind
++ ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),
++ [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).
++
++ Pass this field an array of
++ [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)
++ objects.
++
++ See the [Kubernetes
++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
++ for more info.
++ activeServerLimit:
++ type: [integer, "null"]
++ description: &jupyterhub-native-config-description |
++ JupyterHub native configuration, see the [JupyterHub
++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)
++ for more information.
++ allowNamedServers:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s annotations for the hub pod.
++ authenticatePrometheus:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ concurrentSpawnLimit:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ consecutiveFailureLimit:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ podSecurityContext: &podSecurityContext-spec
++ additionalProperties: true
++ description: |
++ A k8s native specification of the pod's security context, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core)
++ for details.
++ containerSecurityContext: &containerSecurityContext-spec
++ type: object
++ additionalProperties: true
++ description: |
++ A k8s native specification of the container's security context, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)
++ for details.
++ deploymentStrategy:
++ type: object
++ additionalProperties: false
++ properties:
++ rollingUpdate:
++ type: [string, "null"]
++ type:
++ type: [string, "null"]
++ description: |
++ JupyterHub does not support running in parallel, due to this we
++ default to using a deployment strategy of Recreate.
++ extraContainers: &extraContainers-spec
++ type: array
++ description: |
++ Additional containers for the Pod. Use a k8s native syntax.
++ extraVolumeMounts: &extraVolumeMounts-spec
++ type: array
++ description: |
++ Additional volume mounts for the Container. Use a k8s native syntax.
++ extraVolumes: &extraVolumes-spec
++ type: array
++ description: |
++ Additional volumes for the Pod. Use a k8s native syntax.
++ livenessProbe: &probe-spec
++ type: object
++ additionalProperties: true
++ required: [enabled]
++ if:
++ properties:
++ enabled:
++ const: true
++ then:
++ description: |
++ This config option is like the k8s native specification of a
++ container probe, except that it also supports an `enabled` boolean
++ flag.
++
++ See [the k8s
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)
++ for more details.
++ readinessProbe: *probe-spec
++ namedServerLimitPerUser:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ redirectToServer:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ resources: &resources-spec
++ type: object
++ additionalProperties: true
++ description: |
++ A k8s native specification of resources, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).
++ lifecycle: &lifecycle-spec
++ type: object
++ additionalProperties: false
++ description: |
++ A k8s native specification of lifecycle hooks on the container, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core).
++ properties:
++ postStart:
++ type: object
++ additionalProperties: true
++ preStop:
++ type: object
++ additionalProperties: true
++ services:
++ type: object
++ additionalProperties: true
++ description: |
++ This is where you register JupyterHub services. For details on how to
++ configure these services in this Helm chart just keep reading but for
++ details on services themselves instead read [JupyterHub's
++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html).
++
++ ```{note}
++ Only a selection of JupyterHub's configuration options that can be
++ configured for a service are documented below. All configuration set
++ here will be applied even if this Helm chart doesn't recognize it.
++ ```
++
++ JupyterHub's native configuration accepts a list of service objects,
++ this Helm chart only accept a dictionary where each key represents the
++ name of a service and the value is the actual service objects.
++
++ When configuring JupyterHub services via this Helm chart, the `name`
++ field can be omitted as it can be implied by the dictionary key.
++ Further, the `api_token` field can be omitted as it will be
++ automatically generated as of version 1.1.0 of this Helm chart.
++
++ If you have an external service that needs to access the automatically
++ generated api_token for the service, you can access it from the `hub`
++ k8s Secret part of this Helm chart under the key
++ `hub.services.my-service-config-key.apiToken`.
++
++ Here is an example configuration of two services where the first
++ explicitly sets a name and api_token, while the second omits those and
++ lets the name be implied from the key name and the api_token be
++ automatically generated.
++
++ ```yaml
++ hub:
++ services:
++ my-service-1:
++ admin: true
++ name: my-explicitly-set-service-name
++ api_token: my-explicitly-set-api_token
++
++ # the name of the following service will be my-service-2
++ # the api_token of the following service will be generated
++ my-service-2: {}
++ ```
++
++ If you develop a Helm chart depending on the JupyterHub Helm chart and
++ want to let some Pod's environment variable be populated with the
++ api_token of a service registered like above, then do something along
++ these lines.
++
++ ```yaml
++ # ... container specification of a pod ...
++ env:
++ - name: MY_SERVICE_1_API_TOKEN
++ valueFrom:
++ secretKeyRef:
++ # Don't hardcode the name, use the globally accessible
++ # named templates part of the JupyterHub Helm chart.
++ name: {{ include "jupyterhub.hub.fullname" . }}
++ # Note below the use of the configuration key my-service-1
++ # rather than the explicitly set service name.
++ key: hub.services.my-service-1.apiToken
++ ```
++ properties:
++ name:
++ type: string
++ description: |
++ The name can be implied via the key name under which this
++ service is configured, and is due to that allowed to be
++ omitted in this Helm chart configuration of JupyterHub.
++ admin:
++ type: boolean
++ command:
++ type: [string, array]
++ url:
++ type: string
++ api_token:
++ type: [string, "null"]
++ description: |
++ The api_token will be automatically generated if not
++ explicitly set. It will also be exposed in via a k8s Secret
++ part of this Helm chart under a specific key.
++
++ See the documentation under
++ [`hub.services`](schema_hub.services) for details about this.
++ apiToken:
++ type: [string, "null"]
++ description: |
++ An alias for api_token provided for backward compatibility by
++ the JupyterHub Helm chart that will be transformed to
++ api_token.
++ loadRoles:
++ type: object
++ additionalProperties: true
++ description: |
++ This is where you should define JupyterHub roles and apply them to
++ JupyterHub users, groups, and services to grant them additional
++ permissions as defined in JupyterHub's RBAC system.
++
++ Complement this documentation with [JupyterHub's
++ documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles)
++ about `load_roles`.
++
++ Note that while JupyterHub's native configuration `load_roles` accepts
++ a list of role objects, this Helm chart only accepts a dictionary where
++ each key represents the name of a role and the value is the actual
++ role object.
++
++ ```yaml
++ hub:
++ loadRoles:
++ teacher:
++ description: Access to users' information and group membership
++
++ # this role provides permissions to...
++ scopes: [users, groups]
++
++ # this role will be assigned to...
++ users: [erik]
++ services: [grading-service]
++ groups: [teachers]
++ ```
++
++ When configuring JupyterHub roles via this Helm chart, the `name`
++ field can be omitted as it can be implied by the dictionary key.
++ shutdownOnLogout:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ templatePaths:
++ type: array
++ description: *jupyterhub-native-config-description
++ templateVars:
++ type: object
++ additionalProperties: true
++ description: *jupyterhub-native-config-description
++ serviceAccount: &serviceAccount
++ type: object
++ required: [create]
++ additionalProperties: false
++ description: |
++ Configuration for a k8s ServiceAccount dedicated for use by the
++ specific pod which this configuration is nested under.
++ properties:
++ create:
++ type: boolean
++ description: |
++ Whether or not to create the `ServiceAccount` resource.
++ name:
++ type: ["string", "null"]
++ description: |
++ This configuration serves multiple purposes:
++
++ - It will be the `serviceAccountName` referenced by related Pods.
++ - If `create` is set, the created ServiceAccount resource will be named like this.
++ - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.
++
++ If not explicitly provided, a default name will be used.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Kubernetes annotations to apply to the k8s ServiceAccount.
++ extraPodSpec: &extraPodSpec-spec
++ type: object
++ additionalProperties: true
++ description: |
++ Arbitrary extra k8s pod specification as a YAML object. The default
++ value of this setting is an empty object, i.e. no extra configuration.
++ The value of this property is augmented to the pod specification as-is.
++
++ This is a powerful tool for expert k8s administrators with advanced
++ configuration requirements. This setting should only be used for
++ configuration that cannot be accomplished through the other settings.
++ Misusing this setting can break your deployment and/or compromise
++ your system security.
++
++ This is one of four related settings for inserting arbitrary pod
++ specification:
++
++ 1. hub.extraPodSpec
++ 2. proxy.chp.extraPodSpec
++ 3. proxy.traefik.extraPodSpec
++ 4. scheduling.userScheduler.extraPodSpec
++
++ One real-world use of these settings is to enable host networking. For
++ example, to configure host networking for the hub pod, add the
++ following to your helm configuration values:
++
++ ```yaml
++ hub:
++ extraPodSpec:
++ hostNetwork: true
++ dnsPolicy: ClusterFirstWithHostNet
++ ```
++
++ Likewise, to configure host networking for the proxy pod, add the
++ following:
++
++ ```yaml
++ proxy:
++ chp:
++ extraPodSpec:
++ hostNetwork: true
++ dnsPolicy: ClusterFirstWithHostNet
++ ```
++
++ N.B. Host networking has special security implications and can easily
++ break your deployment. This is an example—not an endorsement.
++
++ See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)
++ for the latest pod resource specification.
++
++ proxy:
++ type: object
++ additionalProperties: false
++ properties:
++ chp:
++ type: object
++ additionalProperties: false
++ description: |
++ Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic
++ both to itself and to user pods.
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ networkPolicy: *networkPolicy-spec
++ extraCommandLineFlags:
++ type: array
++ description: |
++ A list of strings to be added as command line options when
++ starting
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options)
++ that will be expanded with Helm's template function `tpl` which
++ can render Helm template logic inside curly braces (`{{ ... }}`).
++
++ ```yaml
++ proxy:
++ chp:
++ extraCommandLineFlags:
++ - "--auto-rewrite"
++ - "--custom-header {{ .Values.myCustomStuff }}"
++ ```
++
++ Note that these will be appended last, and if you provide the same
++ flag twice, the last flag will be used, which mean you can
++ override the default flag values as well.
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the chp pod.
++
++ Environment variables are usually used here to:
++ - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values
++ - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ proxy:
++ chp:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ CHP_NAMESPACE:
++ name: CHP_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_CHP_NAMESPACE:
++ value: "my-prefix-$(CHP_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ livenessProbe: *probe-spec
++ readinessProbe: *probe-spec
++ resources: *resources-spec
++ defaultTarget:
++ type: [string, "null"]
++ description: |
++ Override the URL for the default routing target for the proxy.
++ Defaults to JupyterHub itself.
++ This will generally only have an effect while JupyterHub is not running,
++ as JupyterHub adds itself as the default target after it starts.
++ errorTarget:
++ type: [string, "null"]
++ description: |
++ Override the URL for the error target for the proxy.
++ Defaults to JupyterHub itself.
++ Useful to reduce load on the Hub
++ or produce more informative error messages than the Hub's default,
++ e.g. in highly customized deployments such as BinderHub.
++ See Configurable HTTP Proxy for details on implementing an error target.
++ extraPodSpec: *extraPodSpec-spec
++ secretToken:
++ type: [string, "null"]
++ description: |
++ ```{note}
++ As of version 1.0.0 this will automatically be generated and there is
++ no need to set it manually.
++
++ If you wish to reset a generated key, you can use `kubectl edit` on
++ the k8s Secret typically named `hub` and remove the
++ `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret,
++ then perform a new `helm upgrade`.
++ ```
++
++ A 32-byte cryptographically secure randomly generated string used to
++ secure communications between the hub pod and the proxy pod running a
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
++ instance.
++
++ ```sh
++ # to generate a value, run
++ openssl rand -hex 32
++ ```
++
++ Changing this value will cause the proxy and hub pods to restart. It is good security
++ practice to rotate these values over time. If this secret leaks, *immediately* change
++ it to something else, or user data can be compromised.
++ service:
++ type: object
++ additionalProperties: false
++ description: |
++ Configuration of the k8s Service `proxy-public` which either will
++ point to the `autohttps` pod running Traefik for TLS termination, or
++ the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from
++ users on the internet should always go through this k8s Service.
++
++ When this service targets the `autohttps` pod which then routes to the
++ `proxy` pod, a k8s Service named `proxy-http` will be added targeting
++ the `proxy` pod and only accepting HTTP traffic on port 80.
++ properties:
++ type:
++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
++ description: |
++ Default `LoadBalancer`.
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
++ to learn more about service types.
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the proxy service.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the service that is exposing the proxy.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ nodePorts:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to set NodePorts to expose the service on for http and https.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
++ for more details about NodePorts.
++ properties:
++ http:
++ type: [integer, "null"]
++ description: |
++ The HTTP port the proxy-public service should be exposed on.
++ https:
++ type: [integer, "null"]
++ description: |
++ The HTTPS port the proxy-public service should be exposed on.
++ disableHttpPort:
++ type: boolean
++ description: |
++ Default `false`.
++
++ If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port.
++ extraPorts:
++ type: array
++ description: |
++ Extra ports the k8s Service should accept incoming traffic on,
++ which will be redirected to either the `autohttps` pod (treafik)
++ or the `proxy` pod (chp).
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core)
++ for the structure of the items in this list.
++ loadBalancerIP:
++ type: [string, "null"]
++ description: |
++ The public IP address the proxy-public Kubernetes service should
++ be exposed on. This entry will end up at the configurable proxy
++ server that JupyterHub manages, which will direct traffic to user
++ pods at the `/user` path and the hub pod at the `/hub` path.
++
++ Set this if you want to use a fixed external IP address instead of
++ a dynamically acquired one. This is relevant if you have a domain
++ name that you want to point to a specific IP and want to ensure it
++ doesn't change.
++ loadBalancerSourceRanges:
++ type: array
++ description: |
++ A list of IP CIDR ranges that are allowed to access the load balancer service.
++ Defaults to allowing everyone to access it.
++ https:
++ type: object
++ additionalProperties: false
++ description: |
++ Object for customizing the settings for HTTPS used by the JupyterHub's proxy.
++ For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https)
++ properties:
++ enabled:
++ type: [boolean, "null"]
++ description: |
++ Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided.
++ type:
++ enum: [null, "", letsencrypt, manual, offload, secret]
++ description: |
++ The type of HTTPS encryption that is used.
++ Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object.
++ Defaults to `letsencrypt`.
++ letsencrypt:
++ type: object
++ additionalProperties: false
++ properties:
++ contactEmail:
++ type: [string, "null"]
++ description: |
++ The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https).
++ Required for automatic HTTPS.
++ acmeServer:
++ type: [string, "null"]
++ description: |
++ Let's Encrypt is one of various ACME servers that can provide
++ a certificate, and by default their production server is used.
++
++ Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory
++ Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory
++ manual:
++ type: object
++ additionalProperties: false
++ description: |
++ Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`.
++ See [Set up manual HTTPS](setup-manual-https)
++ properties:
++ key:
++ type: [string, "null"]
++ description: |
++ The RSA private key to be used for HTTPS.
++ To be provided in the form of
++
++ ```
++ key: |
++ -----BEGIN RSA PRIVATE KEY-----
++ ...
++ -----END RSA PRIVATE KEY-----
++ ```
++ cert:
++ type: [string, "null"]
++ description: |
++ The certificate to be used for HTTPS.
++ To be provided in the form of
++
++ ```
++ cert: |
++ -----BEGIN CERTIFICATE-----
++ ...
++ -----END CERTIFICATE-----
++ ```
++ secret:
++ type: object
++ additionalProperties: false
++ description: |
++ Secret to be provided when setting `https.type` to `secret`.
++ properties:
++ name:
++ type: [string, "null"]
++ description: |
++ Name of the secret
++ key:
++ type: [string, "null"]
++ description: |
++ Path to the private key to be used for HTTPS.
++ Example: `'tls.key'`
++ crt:
++ type: [string, "null"]
++ description: |
++ Path to the certificate to be used for HTTPS.
++ Example: `'tls.crt'`
++ hosts:
++ type: array
++ description: |
++ You domain in list form.
++ Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https).
++ To be provided like:
++ ```
++ hosts:
++ -
++ ```
++ traefik:
++ type: object
++ additionalProperties: false
++ description: |
++ Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the traefik pod.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ networkPolicy: *networkPolicy-spec
++ extraInitContainers:
++ type: array
++ description: |
++ list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ proxy:
++ traefik:
++ extraInitContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the traefik pod.
++
++ Environment Variables here may be used to configure traefik.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ proxy:
++ traefik:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ TRAEFIK_NAMESPACE:
++ name: TRAEFIK_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_TRAEFIK_NAMESPACE:
++ value: "my-prefix-$(TRAEFIK_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ extraDynamicConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ This refers to traefik's post-startup configuration.
++
++ This Helm chart already provide such configuration, so this is a
++ place where you can merge in additional configuration. If you are
++ about to use this configuration, you may want to inspect the
++ default configuration declared
++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml).
++ extraPorts:
++ type: array
++ description: |
++ Extra ports for the traefik container within the autohttps pod
++ that you would like to expose, formatted in a k8s native way.
++ extraStaticConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ This refers to traefik's startup configuration.
++
++ This Helm chart already provide such configuration, so this is a
++ place where you can merge in additional configuration. If you are
++ about to use this configuration, you may want to inspect the
++ default configuration declared
++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml).
++ extraVolumes: *extraVolumes-spec
++ extraVolumeMounts: *extraVolumeMounts-spec
++ hsts:
++ type: object
++ additionalProperties: false
++ required: [includeSubdomains, maxAge, preload]
++ description: |
++ This section regards a HTTP Strict-Transport-Security (HSTS)
++ response header. It can act as a request for a visiting web
++ browsers to enforce HTTPS on their end in for a given time into
++ the future, and optionally also for future requests to subdomains.
++
++ These settings relate to traefik configuration which we use as a
++ TLS termination proxy.
++
++ See [Mozilla's
++ documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
++ for more information.
++ properties:
++ includeSubdomains:
++ type: boolean
++ maxAge:
++ type: integer
++ preload:
++ type: boolean
++ image: *image-spec
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ extraPodSpec: *extraPodSpec-spec
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s labels for the proxy pod.
++
++ ```{note}
++ For consistency, this should really be located under
++ proxy.chp.labels but isn't for historical reasons.
++ ```
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s annotations for the proxy pod.
++
++ ```{note}
++ For consistency, this should really be located under
++ proxy.chp.annotations but isn't for historical reasons.
++ ```
++ deploymentStrategy:
++ type: object
++ additionalProperties: false
++ properties:
++ rollingUpdate:
++ type: [string, "null"]
++ type:
++ type: [string, "null"]
++ description: |
++ While the proxy pod running
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
++ could run in parallel, two instances running in parallel wouldn't
++ both receive updates from JupyterHub regarding how it should route
++ traffic. Due to this we default to using a deployment strategy of
++ Recreate instead of RollingUpdate.
++ secretSync:
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration section refers to configuration of the sidecar
++ container in the autohttps pod running next to its traefik container
++ responsible for TLS termination.
++
++ The purpose of this container is to store away and load TLS
++ certificates from a k8s Secret. The TLS certificates are acquired by
++ the ACME client (LEGO) that is running within the traefik container,
++ where traefik is using them for TLS termination.
++ properties:
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ resources: *resources-spec
++
++ singleuser:
++ type: object
++ additionalProperties: false
++ description: |
++ Options for customizing the environment that is provided to the users after they log in.
++ properties:
++ networkPolicy: *networkPolicy-spec
++ podNameTemplate:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template).
++ cpu:
++ type: object
++ additionalProperties: false
++ description: |
++ Set CPU limits & guarantees that are enforced for each user.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
++ for more info.
++ properties:
++ limit:
++ type: [number, "null"]
++ guarantee:
++ type: [number, "null"]
++ memory:
++ type: object
++ additionalProperties: false
++ description: |
++ Set Memory limits & guarantees that are enforced for each user.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
++ for more info.
++ properties:
++ limit:
++ type: [number, string, "null"]
++ guarantee:
++ type: [number, string, "null"]
++ description: |
++ Note that this field is referred to as *requests* by the Kubernetes API.
++ image: *image-spec
++ initContainers:
++ type: array
++ description: |
++ list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ singleuser:
++ initContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ profileList:
++ type: array
++ description: |
++ For more information about the profile list, see [KubeSpawner's
++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner)
++ as this is simply a passthrough to that configuration.
++
++ ```{note}
++ The image-pullers are aware of the overrides of images in
++ `singleuser.profileList` but they won't be if you configure it in
++ JupyterHub's configuration of '`c.KubeSpawner.profile_list`.
++ ```
++
++ ```yaml
++ singleuser:
++ profileList:
++ - display_name: "Default: Shared, 8 CPU cores"
++ description: "Your code will run on a shared machine with CPU only."
++ default: True
++ - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU"
++ description: "Your code will run a personal machine with a GPU."
++ kubespawner_override:
++ extra_resource_limits:
++ nvidia.com/gpu: "1"
++ ```
++ extraFiles: *extraFiles
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the user pods.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes. Note that the user pods will already have
++ access to a set of environment variables that you can use, like
++ `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these
++ inspect [this source
++ code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763).
++
++ ```yaml
++ singleuser:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ USER_NAMESPACE:
++ name: USER_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_USER_NAMESPACE:
++ value: "my-prefix-$(USER_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ nodeSelector: *nodeSelector-spec
++ extraTolerations: *tolerations-spec
++ extraNodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Affinities describe where pods prefer or require to be scheduled, they
++ may prefer or require a node where they are to be scheduled to have a
++ certain label (node affinity). They may also require to be scheduled
++ in proximity or with a lack of proximity to another pod (pod affinity
++ and anti pod affinity).
++
++ See the [Kubernetes
++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
++ for more info.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core)
++ objects.
++ extraPodAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ See the description of `singleuser.extraNodeAffinity`.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
++ objects.
++ extraPodAntiAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ See the description of `singleuser.extraNodeAffinity`.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
++ objects.
++ cloudMetadata:
++ type: object
++ additionalProperties: false
++ required: [blockWithIptables, ip]
++ description: |
++ Please refer to dedicated section in [the Helm chart
++ documentation](block-metadata-iptables) for more information about
++ this.
++ properties:
++ blockWithIptables:
++ type: boolean
++ ip:
++ type: string
++
++ cmd:
++ type: [array, string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd).
++ The default is "jupyterhub-singleuser".
++ Use `cmd: null` to launch a custom CMD from the image,
++ which must launch jupyterhub-singleuser or an equivalent process eventually.
++ For example: Jupyter's docker-stacks images.
++ defaultUrl:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url).
++ # FIXME: name mismatch, named events_enabled in kubespawner
++ events:
++ type: [boolean, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled).
++ extraAnnotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations).
++ extraContainers:
++ type: array
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers).
++ extraLabels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels).
++ extraPodConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config).
++ extraResource:
++ type: object
++ additionalProperties: false
++ properties:
++ # FIXME: name mismatch, named extra_resource_guarantees in kubespawner
++ guarantees:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees).
++ # FIXME: name mismatch, named extra_resource_limits in kubespawner
++ limits:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits).
++ fsGid:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid).
++ lifecycleHooks:
++ type: object
++ additionalProperties: false
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks).
++ properties:
++ postStart:
++ type: object
++ additionalProperties: true
++ preStop:
++ type: object
++ additionalProperties: true
++ networkTools:
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration section refers to configuration of a conditionally
++ created initContainer for the user pods with a purpose to block a
++ specific IP address.
++
++ This initContainer will be created if
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ is set to true.
++ properties:
++ image: *image-spec
++ resources: *resources-spec
++ # FIXME: name mismatch, named service_account in kubespawner
++ serviceAccountName:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account).
++ startTimeout:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout).
++ storage:
++ type: object
++ additionalProperties: false
++ required: [type, homeMountPath]
++ description: |
++ This section configures KubeSpawner directly to some extent but also
++ indirectly through Helm chart specific configuration options such as
++ [`singleuser.storage.type`](schema_singleuser.storage.type).
++ properties:
++ capacity:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_capacity`.
++
++ See the [KubeSpawner
++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html)
++ for more information.
++ dynamic:
++ type: object
++ additionalProperties: false
++ properties:
++ pvcNameTemplate:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.pvc_name_template` which will be the
++ resource name of the PVC created by KubeSpawner for each user
++ if needed.
++ storageAccessModes:
++ type: array
++ items:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_access_modes`.
++
++ See KubeSpawners documentation and [the k8s
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
++ for more information.
++ storageClass:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_class`, which can be an
++ explicit StorageClass to dynamically provision storage for the
++ PVC that KubeSpawner will create.
++
++ There is of a default StorageClass available in k8s clusters
++ for use if this is unspecified.
++ volumeNameTemplate:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.volume_name_template`, which is the
++ name to reference from the containers volumeMounts section.
++ extraLabels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Configures `KubeSpawner.storage_extra_labels`. Note that these
++ labels are set on the PVC during creation only and won't be
++ updated after creation.
++ extraVolumeMounts: *extraVolumeMounts-spec
++ extraVolumes: *extraVolumes-spec
++ homeMountPath:
++ type: string
++ description: |
++ The location within the container where the home folder storage
++ should be mounted.
++ static:
++ type: object
++ additionalProperties: false
++ properties:
++ pvcName:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.pvc_claim_name` to reference
++ pre-existing storage.
++ subPath:
++ type: [string, "null"]
++ description: |
++ Configures the `subPath` field of a
++ `KubeSpawner.volume_mounts` entry added by the Helm chart.
++
++ Path within the volume from which the container's volume
++ should be mounted.
++ type:
++ enum: [dynamic, static, none]
++ description: |
++ Decide if you want storage to be provisioned dynamically
++ (dynamic), or if you want to attach existing storage (static), or
++ don't want any storage to be attached (none).
++ allowPrivilegeEscalation:
++ type: [boolean, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation).
++ uid:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid).
++
++ This dictates as what user the main container will start up as.
++
++ As an example of when this is needed, consider if you want to enable
++ sudo rights for some of your users. This can be done by starting up as
++ root, enabling it from the container in a startup script, and then
++ transitioning to the normal user.
++
++ Default is 1000, set to null to use the container's default.
++
++ scheduling:
++ type: object
++ additionalProperties: false
++ description: |
++ Objects for customizing the scheduling of various pods on the nodes and
++ related labels.
++ properties:
++ userScheduler:
++ type: object
++ additionalProperties: false
++ required: [enabled, plugins, pluginConfig, logLevel]
++ description: |
++ The user scheduler is making sure that user pods are scheduled
++ tight on nodes, this is useful for autoscaling of user node pools.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enables the user scheduler.
++ revisionHistoryLimit: *revisionHistoryLimit
++ replicas:
++ type: integer
++ description: |
++ You can have multiple schedulers to share the workload or improve
++ availability on node failure.
++ image: *image-spec
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the userScheduler pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra annotations to add to the user-scheduler pods.
++ containerSecurityContext: *containerSecurityContext-spec
++ logLevel:
++ type: integer
++ description: |
++ Corresponds to the verbosity level of logging made by the
++ kube-scheduler binary running within the user-scheduler pod.
++ plugins:
++ type: object
++ additionalProperties: true
++ description: |
++ These plugins refers to kube-scheduler plugins as documented
++ [here](https://kubernetes.io/docs/reference/scheduling/config/).
++
++ The user-scheduler is really just a kube-scheduler configured in a
++ way to pack users tight on nodes using these plugins. See
++ values.yaml for information about the default plugins.
++ pluginConfig:
++ type: array
++ description: |
++ Individually activated plugins can be configured further.
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ extraPodSpec: *extraPodSpec-spec
++ podPriority:
++ type: object
++ additionalProperties: false
++ description: |
++ Pod Priority is used to allow real users evict user placeholder pods
++ that in turn by entering a Pending state can trigger a scale up by a
++ cluster autoscaler.
++
++ Having this option enabled only make sense if the following conditions
++ are met:
++
++ 1. A cluster autoscaler is installed.
++ 2. user-placeholer pods are configured to have a priority equal or
++ higher than the cluster autoscaler's "priority cutoff" so that the
++ cluster autoscaler scales up a node in advance for a pending user
++ placeholder pod.
++ 3. Normal user pods have a higher priority than the user-placeholder
++ pods.
++ 4. Image puller pods have a priority between normal user pods and
++ user-placeholder pods.
++
++ Note that if the default priority cutoff if not configured on cluster
++ autoscaler, it will currently default to 0, and that in the future
++ this is meant to be lowered. If your cloud provider is installing the
++ cluster autoscaler for you, they may also configure this specifically.
++
++ Recommended settings for a cluster autoscaler...
++
++ ... with a priority cutoff of -10 (GKE):
++
++ ```yaml
++ podPriority:
++ enabled: true
++ globalDefault: false
++ defaultPriority: 0
++ imagePullerPriority: -5
++ userPlaceholderPriority: -10
++ ```
++
++ ... with a priority cutoff of 0:
++
++ ```yaml
++ podPriority:
++ enabled: true
++ globalDefault: true
++ defaultPriority: 10
++ imagePullerPriority: 5
++ userPlaceholderPriority: 0
++ ```
++ properties:
++ enabled:
++ type: boolean
++ globalDefault:
++ type: boolean
++ description: |
++ Warning! This will influence all pods in the cluster.
++
++ The priority a pod usually get is 0. But this can be overridden
++ with a PriorityClass resource if it is declared to be the global
++ default. This configuration option allows for the creation of such
++ global default.
++ defaultPriority:
++ type: integer
++ description: |
++ The actual value for the default pod priority.
++ imagePullerPriority:
++ type: integer
++ description: |
++ The actual value for the [hook|continuous]-image-puller pods' priority.
++ userPlaceholderPriority:
++ type: integer
++ description: |
++ The actual value for the user-placeholder pods' priority.
++ userPlaceholder:
++ type: object
++ additionalProperties: false
++ description: |
++ User placeholders simulate users but will thanks to PodPriority be
++ evicted by the cluster autoscaler if a real user shows up. In this way
++ placeholders allow you to create a headroom for the real users and
++ reduce the risk of a user having to wait for a node to be added. Be
++ sure to use the the continuous image puller as well along with
++ placeholders, so the images are also available when real users arrive.
++
++ To test your setup efficiently, you can adjust the amount of user
++ placeholders with the following command:
++ ```sh
++ # Configure to have 3 user placeholders
++ kubectl scale sts/user-placeholder --replicas=3
++ ```
++ properties:
++ enabled:
++ type: boolean
++ image: *image-spec
++ revisionHistoryLimit: *revisionHistoryLimit
++ replicas:
++ type: integer
++ description: |
++ How many placeholder pods would you like to have?
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the userPlaceholder pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra annotations to add to the placeholder pods.
++ resources:
++ type: object
++ additionalProperties: true
++ description: |
++ Unless specified here, the placeholder pods will request the same
++ resources specified for the real singleuser pods.
++ containerSecurityContext: *containerSecurityContext-spec
++ corePods:
++ type: object
++ additionalProperties: false
++ description: |
++ These settings influence the core pods like the hub, proxy and
++ user-scheduler pods.
++ These settings influence all pods considered core pods, namely:
++
++ - hub
++ - proxy
++ - autohttps
++ - hook-image-awaiter
++ - user-scheduler
++
++ By defaults, the tolerations are:
++
++ - hub.jupyter.org/dedicated=core:NoSchedule
++ - hub.jupyter.org_dedicated=core:NoSchedule
++
++ Note that tolerations set here are combined with the respective
++ components dedicated tolerations, and that `_` is available in case
++ `/` isn't allowed in the clouds tolerations.
++ properties:
++ tolerations: *tolerations-spec
++ nodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Where should pods be scheduled? Perhaps on nodes with a certain
++ label is preferred or even required?
++ properties:
++ matchNodePurpose:
++ enum: [ignore, prefer, require]
++ description: |
++ Decide if core pods *ignore*, *prefer* or *require* to
++ schedule on nodes with this label:
++ ```
++ hub.jupyter.org/node-purpose=core
++ ```
++ userPods:
++ type: object
++ additionalProperties: false
++ description: |
++ These settings influence all pods considered user pods, namely:
++
++ - user-placeholder
++ - hook-image-puller
++ - continuous-image-puller
++ - jupyter-
++
++ By defaults, the tolerations are:
++
++ - hub.jupyter.org/dedicated=core:NoSchedule
++ - hub.jupyter.org_dedicated=core:NoSchedule
++
++ Note that tolerations set here are combined with the respective
++ components dedicated tolerations, and that `_` is available in case
++ `/` isn't allowed in the clouds tolerations.
++ properties:
++ tolerations: *tolerations-spec
++ nodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Where should pods be scheduled? Perhaps on nodes with a certain
++ label is preferred or even required?
++ properties:
++ matchNodePurpose:
++ enum: [ignore, prefer, require]
++ description: |
++ Decide if user pods *ignore*, *prefer* or *require* to
++ schedule on nodes with this label:
++ ```
++ hub.jupyter.org/node-purpose=user
++ ```
++
++ ingress:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enable the creation of a Kubernetes Ingress to proxy-public service.
++
++ See [Advanced Topics — Zero to JupyterHub with Kubernetes
++ 0.7.0 documentation](ingress)
++ for more details.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the Ingress resource.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ ingressClassName:
++ type: [string, "null"]
++ description: |
++ Maps directly to the Ingress resource's `spec.ingressClassName``.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class)
++ for more details.
++ hosts:
++ type: array
++ description: |
++ List of hosts to route requests to the proxy.
++ pathSuffix:
++ type: [string, "null"]
++ description: |
++ Suffix added to Ingress's routing path pattern.
++
++ Specify `*` if your ingress matches path by glob pattern.
++ pathType:
++ enum: [Prefix, Exact, ImplementationSpecific]
++ description: |
++ The path type to use. The default value is 'Prefix'.
++
++ See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
++ for more details about path types.
++ tls:
++ type: array
++ description: |
++ TLS configurations for Ingress.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)
++ for more details about annotations.
++
++ prePuller:
++ type: object
++ additionalProperties: false
++ required: [hook, continuous]
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the pre puller job pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the hook and continous image puller pods. One example use case is to
++ disable istio sidecars which could interfere with the image pulling.
++ resources:
++ type: object
++ additionalProperties: true
++ description: |
++ These are standard Kubernetes resources with requests and limits for
++ cpu and memory. They will be used on the containers in the pods
++ pulling images. These should be set extremely low as the containers
++ shut down directly or is a pause container that just idles.
++
++ They were made configurable as usage of ResourceQuota may require
++ containers in the namespace to have explicit resources set.
++ extraTolerations: *tolerations-spec
++ hook:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ See the [*optimization
++ section*](pulling-images-before-users-arrive)
++ for more details.
++ properties:
++ enabled:
++ type: boolean
++ pullOnlyOnChanges:
++ type: boolean
++ description: |
++ Pull only if changes have been made to the images to pull, or more
++ accurately if the hook-image-puller daemonset has changed in any
++ way.
++ podSchedulingWaitDuration:
++ description: |
++ The `hook-image-awaiter` has a criteria to await all the
++ `hook-image-puller` DaemonSet's pods to both schedule and finish
++ their image pulling. This flag can be used to relax this criteria
++ to instead only await the pods that _has already scheduled_ to
++ finish image pulling after a certain duration.
++
++ The value of this is that sometimes the newly created
++ `hook-image-puller` pods cannot be scheduled because nodes are
++ full, and then it probably won't make sense to block a `helm
++ upgrade`.
++
++ An infinite duration to wait for pods to schedule can be
++ represented by `-1`. This was the default behavior of version
++ 0.9.0 and earlier.
++ type: integer
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ continuous:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ See the [*optimization
++ section*](pulling-images-before-users-arrive)
++ for more details.
++
++ ```{note}
++ If used with a Cluster Autoscaler (an autoscaling node pool), also add
++ user-placeholders and enable pod priority.
++ ```
++ properties:
++ enabled:
++ type: boolean
++ pullProfileListImages:
++ type: boolean
++ description: |
++ The singleuser.profileList configuration can provide a selection of
++ images. This option determines if all images identified there should
++ be pulled, both by the hook and continuous pullers.
++
++ Images are looked for under `kubespawner_override`, and also
++ `profile_options.choices.kubespawner_override` since version 3.2.0.
++
++ The reason to disable this, is that if you have for example 10 images
++ which start pulling in order from 1 to 10, a user that arrives and
++ wants to start a pod with image number 10 will need to wait for all
++ images to be pulled, and then it may be preferable to just let the
++ user arriving wait for a single image to be pulled on arrival.
++ extraImages:
++ type: object
++ additionalProperties: false
++ description: |
++ See the [*optimization section*](images-that-will-be-pulled) for more
++ details.
++
++ ```yaml
++ prePuller:
++ extraImages:
++ my-extra-image-i-want-pulled:
++ name: jupyter/all-spark-notebook
++ tag: 2343e33dec46
++ ```
++ patternProperties:
++ ".*":
++ type: object
++ additionalProperties: false
++ required: [name, tag]
++ properties:
++ name:
++ type: string
++ tag:
++ type: string
++ containerSecurityContext: *containerSecurityContext-spec
++ pause:
++ type: object
++ additionalProperties: false
++ description: |
++ The image-puller pods rely on initContainer to pull all images, and
++ their actual container when they are done is just running a `pause`
++ container. These are settings for that pause container.
++ properties:
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++
++ custom:
++ type: object
++ additionalProperties: true
++ description: |
++ Additional values to pass to the Hub.
++ JupyterHub will not itself look at these,
++ but you can read values in your own custom config via `hub.extraConfig`.
++ For example:
++
++ ```yaml
++ custom:
++ myHost: "https://example.horse"
++ hub:
++ extraConfig:
++ myConfig.py: |
++ c.MyAuthenticator.host = get_config("custom.myHost")
++ ```
++
++ cull:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ The
++ [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler)
++ can run as a JupyterHub managed service to _cull_ running servers.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enable/disable use of jupyter-idle-culler.
++ users:
++ type: [boolean, "null"]
++ description: See the `--cull-users` flag.
++ adminUsers:
++ type: [boolean, "null"]
++ description: See the `--cull-admin-users` flag.
++ removeNamedServers:
++ type: [boolean, "null"]
++ description: See the `--remove-named-servers` flag.
++ timeout:
++ type: [integer, "null"]
++ description: See the `--timeout` flag.
++ every:
++ type: [integer, "null"]
++ description: See the `--cull-every` flag.
++ concurrency:
++ type: [integer, "null"]
++ description: See the `--concurrency` flag.
++ maxAge:
++ type: [integer, "null"]
++ description: See the `--max-age` flag.
++
++ debug:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Increases the loglevel throughout the resources in the Helm chart.
++
++ rbac:
++ type: object
++ additionalProperties: false
++ required: [create]
++ properties:
++ enabled:
++ type: boolean
++ # This schema entry is needed to help us print a more helpful error
++ # message in NOTES.txt if hub.fsGid is set.
++ #
++ description: |
++ ````{note}
++ Removed in version 2.0.0. If you have been using `rbac.enable=false`
++ (strongly discouraged), then the equivalent configuration would be:
++
++ ```yaml
++ rbac:
++ create: false
++ hub:
++ serviceAccount:
++ create: false
++ proxy:
++ traefik:
++ serviceAccount:
++ create: false
++ scheduling:
++ userScheduler:
++ serviceAccount:
++ create: false
++ prePuller:
++ hook:
++ serviceAccount:
++ create: false
++ ```
++ ````
++ create:
++ type: boolean
++ description: |
++ Decides if (Cluster)Role and (Cluster)RoleBinding resources are
++ created and bound to the configured serviceAccounts.
++
++ global:
++ type: object
++ additionalProperties: true
++ properties:
++ safeToShowValues:
++ type: boolean
++ description: |
++ A flag that should only be set to true temporarily when experiencing a
++ deprecation message that contain censored content that you wish to
++ reveal.
+diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
+index 2f5cbca..41e108d 100755
+--- a/applications/jupyterhub/deploy/values.yaml
++++ b/applications/jupyterhub/deploy/values.yaml
+@@ -1,4 +1,4 @@
+-harness:
++harness: # EDIT: CLOUDHARNESS
+ subdomain: hub
+ service:
+ auto: false
+@@ -31,6 +31,11 @@ harness:
+ fullnameOverride: ""
+ nameOverride:
+
++# enabled is ignored by the jupyterhub chart itself, but a chart depending on
++# the jupyterhub chart conditionally can make use this config option as the
++# condition.
++enabled:
++
+ # custom can contain anything you want to pass to the hub pod, as all passed
+ # Helm template values will be made available there.
+ custom: {}
+@@ -54,10 +59,11 @@ imagePullSecrets: []
+ # ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
+ # the proxy pod.
+ hub:
++ revisionHistoryLimit:
+ config:
+ JupyterHub:
+ admin_access: true
+- authenticator_class: keycloak
++ authenticator_class: keycloak # EDIT: CLOUDHARNESS
+ service:
+ type: ClusterIP
+ annotations: {}
+@@ -68,7 +74,6 @@ hub:
+ baseUrl: /
+ cookieSecret:
+ initContainers: []
+- fsGid: 1000
+ nodeSelector: {}
+ tolerations: []
+ concurrentSpawnLimit: 64
+@@ -106,37 +111,38 @@ hub:
+ extraVolumes: []
+ extraVolumeMounts: []
+ image:
+- name: jupyterhub/k8s-hub
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-hub
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
++ podSecurityContext:
++ fsGroup: 1000
+ containerSecurityContext:
+ runAsUser: 1000
+ runAsGroup: 1000
+ allowPrivilegeEscalation: false
+ lifecycle: {}
++ loadRoles: {}
+ services: {}
+ pdb:
+ enabled: false
+ maxUnavailable:
+ minAvailable: 1
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- ## egress for JupyterHub already includes Kubernetes internal DNS and
+- ## access to the proxy, but can be restricted further, but ensure to allow
+- ## access to the Kubernetes API server that couldn't be pinned ahead of
+- ## time.
+- ##
+- ## ref: https://stackoverflow.com/a/59016417/2220152
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+- allowNamedServers: true
++ allowNamedServers: true # EDIT: CLOUDHARNESS
+ namedServerLimitPerUser:
+ authenticatePrometheus:
+ redirectToServer:
+@@ -163,11 +169,13 @@ hub:
+ timeoutSeconds: 1
+ existingSecret:
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+
+ rbac:
+- enabled: true
++ create: true
+
+ # proxy relates to the proxy pod, the proxy-public service, and the autohttps
+ # pod and proxy-http service.
+@@ -202,7 +210,7 @@ proxy:
+ rollingUpdate:
+ # service relates to the proxy-public service
+ service:
+- type: NodePort
++ type: NodePort # EDIT: CLOUDHARNESS
+ labels: {}
+ annotations: {}
+ nodePorts:
+@@ -215,13 +223,17 @@ proxy:
+ # chp relates to the proxy pod, which is responsible for routing traffic based
+ # on dynamic configuration sent from JupyterHub to CHP's REST API.
+ chp:
++ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: jupyterhub/configurable-http-proxy
+- tag: 4.5.0 # https://github.com/jupyterhub/configurable-http-proxy/releases
++ name: quay.io/jupyterhub/configurable-http-proxy
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
++ #
++ tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
+ pullPolicy:
+ pullSecrets: []
+ extraCommandLineFlags: []
+@@ -229,11 +241,14 @@ proxy:
+ enabled: true
+ initialDelaySeconds: 60
+ periodSeconds: 10
++ failureThreshold: 30
++ timeoutSeconds: 3
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 2
+ failureThreshold: 1000
++ timeoutSeconds: 1
+ resources: {}
+ defaultTarget:
+ errorTarget:
+@@ -241,12 +256,16 @@ proxy:
+ nodeSelector: {}
+ tolerations: []
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+@@ -257,13 +276,17 @@ proxy:
+ # traefik relates to the autohttps pod, which is responsible for TLS
+ # termination when proxy.https.type=letsencrypt.
+ traefik:
++ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: traefik
+- tag: v2.4.11 # ref: https://hub.docker.com/_/traefik?tab=tags
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
++ #
++ tag: "v2.10.7" # ref: https://hub.docker.com/_/traefik?tab=tags
+ pullPolicy:
+ pullSecrets: []
+ hsts:
+@@ -272,6 +295,7 @@ proxy:
+ maxAge: 15724800 # About 6 months
+ resources: {}
+ labels: {}
++ extraInitContainers: []
+ extraEnv: {}
+ extraVolumes: []
+ extraVolumeMounts: []
+@@ -283,10 +307,14 @@ proxy:
+ networkPolicy:
+ enabled: true
+ ingress: []
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+@@ -294,6 +322,8 @@ proxy:
+ maxUnavailable:
+ minAvailable: 1
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+ secretSync:
+@@ -302,8 +332,8 @@ proxy:
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: jupyterhub/k8s-secret-sync
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-secret-sync
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
+@@ -342,29 +372,27 @@ singleuser:
+ preferred: []
+ networkTools:
+ image:
+- name: jupyterhub/k8s-network-tools
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-network-tools
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
++ resources: {}
+ cloudMetadata:
+ # block set to true will append a privileged initContainer using the
+ # iptables to block the sensitive metadata server at the provided ip.
+- blockWithIptables: false
++ blockWithIptables: true
++ ip: 169.254.169.254
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- egress:
+- # Required egress to communicate with the hub and DNS servers will be
+- # augmented to these egress rules.
+- #
+- # This default rule explicitly allows all outbound traffic from singleuser
+- # pods, except to a typical IP used to return metadata that can be used by
+- # someone with malicious intent.
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
+- except:
+- - 169.254.169.254/32
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: false
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: false
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+ events: true
+@@ -376,6 +404,7 @@ singleuser:
+ lifecycleHooks: {}
+ initContainers: []
+ extraContainers: []
++ allowPrivilegeEscalation: false
+ uid: 1000
+ fsGid: 100
+ serviceAccountName:
+@@ -387,29 +416,29 @@ singleuser:
+ static:
+ pvcName:
+ subPath: "{username}"
+- capacity: 10Mi
+- homeMountPath: /home/workspace
++ capacity: 10Mi # EDIT: CLOUDHARNESS
++ homeMountPath: /home/workspace # EDIT: CLOUDHARNESS
+ dynamic:
+ storageClass:
+- pvcNameTemplate: jupyter-{username}
+- volumeNameTemplate: jupyter-{username}
++ pvcNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
++ volumeNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
+ storageAccessModes: [ReadWriteOnce]
+ image:
+- name: jupyter/base-notebook
+- tag: "hub-1.4.2"
++ name: quay.io/jupyterhub/k8s-singleuser-sample
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ startTimeout: 300
+ cpu:
+- limit: 0.4
+- guarantee: 0.05
++ limit: 0.4 # EDIT: CLOUDHARNESS
++ guarantee: 0.05 # EDIT: CLOUDHARNESS
+ memory:
+- limit: 0.5G
+- guarantee: 0.1G
++ limit: 0.5G # EDIT: CLOUDHARNESS
++ guarantee: 0.1G # EDIT: CLOUDHARNESS
+ extraResource:
+ limits: {}
+ guarantees: {}
+- cmd: /usr/local/bin/start-singleuser.sh
++ cmd: jupyterhub-singleuser
+ defaultUrl:
+ extraPodConfig: {}
+ profileList: []
+@@ -417,74 +446,146 @@ singleuser:
+ # scheduling relates to the user-scheduler pods and user-placeholder pods.
+ scheduling:
+ userScheduler:
+- enabled: false
++ enabled: false # EDIT: CLOUDHARNESS
++ revisionHistoryLimit:
+ replicas: 2
+ logLevel: 4
++ # plugins are configured on the user-scheduler to make us score how we
++ # schedule user pods in a way to help us schedule on the most busy node. By
++ # doing this, we help scale down more effectively. It isn't obvious how to
++ # enable/disable scoring plugins, and configure them, to accomplish this.
++ #
+ # plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
++ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
++ #
+ plugins:
+ score:
++ # These scoring plugins are enabled by default according to
++ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
++ # 2022-02-22.
++ #
++ # Enabled with high priority:
++ # - NodeAffinity
++ # - InterPodAffinity
++ # - NodeResourcesFit
++ # - ImageLocality
++ # Remains enabled with low default priority:
++ # - TaintToleration
++ # - PodTopologySpread
++ # - VolumeBinding
++ # Disabled for scoring:
++ # - NodeResourcesBalancedAllocation
++ #
+ disabled:
+- - name: SelectorSpread
+- - name: TaintToleration
+- - name: PodTopologySpread
++ # We disable these plugins (with regards to scoring) to not interfere
++ # or complicate our use of NodeResourcesFit.
+ - name: NodeResourcesBalancedAllocation
+- - name: NodeResourcesLeastAllocated
+ # Disable plugins to be allowed to enable them again with a different
+ # weight and avoid an error.
+- - name: NodePreferAvoidPods
+ - name: NodeAffinity
+ - name: InterPodAffinity
++ - name: NodeResourcesFit
+ - name: ImageLocality
+ enabled:
+- - name: NodePreferAvoidPods
+- weight: 161051
+ - name: NodeAffinity
+ weight: 14631
+ - name: InterPodAffinity
+ weight: 1331
+- - name: NodeResourcesMostAllocated
++ - name: NodeResourcesFit
+ weight: 121
+ - name: ImageLocality
+ weight: 11
++ pluginConfig:
++ # Here we declare that we should optimize pods to fit based on a
++ # MostAllocated strategy instead of the default LeastAllocated.
++ - name: NodeResourcesFit
++ args:
++ scoringStrategy:
++ resources:
++ - name: cpu
++ weight: 1
++ - name: memory
++ weight: 1
++ type: MostAllocated
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ # IMPORTANT: Bumping the minor version of this binary should go hand in
+- # hand with an inspection of the user-scheduelrs RBAC resources
+- # that we have forked.
+- name: k8s.gcr.io/kube-scheduler
+- tag: v1.19.13 # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
++ # hand with an inspection of the user-scheduelr's RBAC
++ # resources that we have forked in
++ # templates/scheduling/user-scheduler/rbac.yaml.
++ #
++ # Debugging advice:
++ #
++ # - Is configuration of kube-scheduler broken in
++ # templates/scheduling/user-scheduler/configmap.yaml?
++ #
++ # - Is the kube-scheduler binary's compatibility to work
++ # against a k8s api-server that is too new or too old?
++ #
++ # - You can update the GitHub workflow that runs tests to
++ # include "deploy/user-scheduler" in the k8s namespace report
++ # and reduce the user-scheduler deployments replicas to 1 in
++ # dev-config.yaml to get relevant logs from the user-scheduler
++ # pods. Inspect the "Kubernetes namespace report" action!
++ #
++ # - Typical failures are that kube-scheduler fails to search for
++ # resources via its "informers", and won't start trying to
++ # schedule pods before they succeed which may require
++ # additional RBAC permissions or that the k8s api-server is
++ # aware of the resources.
++ #
++ # - If "successfully acquired lease" can be seen in the logs, it
++ # is a good sign kube-scheduler is ready to schedule pods.
++ #
++ name: registry.k8s.io/kube-scheduler
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow. The minor version is pinned in the
++ # workflow, and should be updated there if a minor version bump is done
++ # here. We aim to stay around 1 minor version behind the latest k8s
++ # version.
++ #
++ tag: "v1.28.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
+ pullPolicy:
+ pullSecrets: []
+ nodeSelector: {}
+ tolerations: []
++ labels: {}
++ annotations: {}
+ pdb:
+ enabled: true
+ maxUnavailable: 1
+ minAvailable:
+ resources: {}
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+ podPriority:
+ enabled: false
+ globalDefault: false
+ defaultPriority: 0
++ imagePullerPriority: -5
+ userPlaceholderPriority: -10
+ userPlaceholder:
+ enabled: true
+ image:
+- name: k8s.gcr.io/pause
+- # tag's can be updated by inspecting the output of the command:
+- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
++ name: registry.k8s.io/pause
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update prePuller.pause.image.tag
+- tag: "3.5"
++ #
++ tag: "3.9"
+ pullPolicy:
+ pullSecrets: []
++ revisionHistoryLimit:
+ replicas: 0
++ labels: {}
++ annotations: {}
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+@@ -517,6 +618,8 @@ scheduling:
+
+ # prePuller relates to the hook|continuous-image-puller DaemonsSets
+ prePuller:
++ revisionHistoryLimit:
++ labels: {}
+ annotations: {}
+ resources: {}
+ containerSecurityContext:
+@@ -530,8 +633,8 @@ prePuller:
+ pullOnlyOnChanges: true
+ # image and the configuration below relates to the hook-image-awaiter Job
+ image:
+- name: jupyterhub/k8s-image-awaiter
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-image-awaiter
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ containerSecurityContext:
+@@ -543,6 +646,8 @@ prePuller:
+ tolerations: []
+ resources: {}
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ continuous:
+ enabled: true
+@@ -554,18 +659,20 @@ prePuller:
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: k8s.gcr.io/pause
+- # tag's can be updated by inspecting the output of the command:
+- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
++ name: registry.k8s.io/pause
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update scheduling.userPlaceholder.image.tag
+- tag: "3.5"
++ #
++ tag: "3.9"
+ pullPolicy:
+ pullSecrets: []
+
+ ingress:
+ enabled: false
+ annotations: {}
++ ingressClassName:
+ hosts: []
+ pathSuffix:
+ pathType: Prefix
+@@ -581,7 +688,8 @@ ingress:
+ cull:
+ enabled: true
+ users: false # --cull-users
+- removeNamedServers: true # --remove-named-servers
++ adminUsers: true # --cull-admin-users
++ removeNamedServers: true # EDIT: CLOUDHARNESS
+ timeout: 3600 # --timeout
+ every: 600 # --cull-every
+ concurrency: 10 # --concurrency
+diff --git a/applications/jupyterhub/zero-to-jupyterhub-k8s b/applications/jupyterhub/zero-to-jupyterhub-k8s
+new file mode 160000
+index 0000000..c92c123
+--- /dev/null
++++ b/applications/jupyterhub/zero-to-jupyterhub-k8s
+@@ -0,0 +1 @@
++Subproject commit c92c12374795e84f36f5f16c4e8b8a448ad2f230-dirty
diff --git a/applications/jupyterhub/update.sh b/applications/jupyterhub/update.sh
new file mode 100644
index 00000000..cddf6899
--- /dev/null
+++ b/applications/jupyterhub/update.sh
@@ -0,0 +1,28 @@
+git clone -n git@github.com:jupyterhub/zero-to-jupyterhub-k8s.git
+git checkout jupyterhub
+git checkout chartpress.yaml
+pip install chartpress
+cd zero-to-jupyterhub-k8s
+chartpress -t $1
+cd ..
+cp -R zero-to-jupyterhub-k8s/jupyterhub/templates/* deploy/templates
+cp zero-to-jupyterhub-k8s/jupyterhub/files/hub/* deploy/resources/hub
+cp zero-to-jupyterhub-k8s/jupyterhub/values* deploy
+cd deploy
+
+rm -Rf templates/proxy/autohttps # Proxy is not used as node balancer
+rm templates/ingress.yaml # Default cloudharness ingress is used
+# Command to replace everything like files/hub/ inside deploy/templates with resources/jupyterhub/hub/
+find templates -type f -exec sed -i 's/files\/hub/resources\/jupyterhub\/hub/g' {} \;
+
+# replace .Values.hub. with .Values.hub.config with .Values.apps.jupyterhub.hub
+find templates -type f -exec sed -i 's/.Values./.Values.apps.jupyterhub./g' {} \;
+
+# replace .Values.apps.jupyterhub.hub.image with .Values.apps.jupyterhub.harness.deployment.image
+find templates -type f -exec sed -i 's/{{ .Values.apps.jupyterhub.hub.image.name }}:{{ .Values.apps.jupyterhub.hub.image.tag }}/{{ .Values.apps.jupyterhub.harness.deployment.image }}/g' {} \;
+
+
+
+find templates -type f -exec sed -i 's$.Template.BasePath "/hub$.Template.BasePath "/jupyterhub/hub$g' {} \;
+find templates -type f -exec sed -i 's$.Template.BasePath "/proxy$.Template.BasePath "/jupyterhub/proxy$g' {} \;
+find templates -type f -exec sed -i 's$.Template.BasePath "/scheduling$.Template.BasePath "/jupyterhub/scheduling$g' {} \;
diff --git a/deployment/codefresh-test-local.yaml b/deployment/codefresh-test-local.yaml
index 612e214b..19a91c83 100644
--- a/deployment/codefresh-test-local.yaml
+++ b/deployment/codefresh-test-local.yaml
@@ -32,9 +32,8 @@ steps:
working_directory: .
commands:
- bash cloud-harness/install.sh
- - harness-deployment . -n test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}}
- -d ${{CF_SHORT_REVISION}}.${{DOMAIN}} -r ${{REGISTRY}} -rs ${{REGISTRY_SECRET}}
- -e test-local --write-env -N -i samples
+ - harness-deployment . -n test-${{NAMESPACE_BASENAME}} -d ${{DOMAIN}} -r ${{REGISTRY}}
+ -rs ${{REGISTRY_SECRET}} -e test-local --write-env -N -i jupyterhub
- cat deployment/.env >> ${{CF_VOLUME_PATH}}/env_vars_to_export
- cat ${{CF_VOLUME_PATH}}/env_vars_to_export
prepare_deployment_view:
@@ -72,33 +71,11 @@ steps:
== true
forceNoCache: includes('${{CLOUDHARNESS_BASE_TAG_FORCE_BUILD}}', '{{CLOUDHARNESS_BASE_TAG_FORCE_BUILD}}')
== false
- cloudharness-frontend-build:
- type: build
- stage: build
- dockerfile: infrastructure/base-images/cloudharness-frontend-build/Dockerfile
- registry: '${{CODEFRESH_REGISTRY}}'
- buildkit: true
- build_arguments:
- - DOMAIN=${{DOMAIN}}
- - NOCACHE=${{CF_BUILD_ID}}
- - REGISTRY=${{REGISTRY}}/cloudharness/
- image_name: cloudharness/cloudharness-frontend-build
- title: Cloudharness frontend build
- working_directory: ./.
- tag: '${{CLOUDHARNESS_FRONTEND_BUILD_TAG}}'
- when:
- condition:
- any:
- buildDoesNotExist: includes('${{CLOUDHARNESS_FRONTEND_BUILD_TAG_EXISTS}}',
- '{{CLOUDHARNESS_FRONTEND_BUILD_TAG_EXISTS}}') == true
- forceNoCache: includes('${{CLOUDHARNESS_FRONTEND_BUILD_TAG_FORCE_BUILD}}',
- '{{CLOUDHARNESS_FRONTEND_BUILD_TAG_FORCE_BUILD}}') == false
- build_static_images:
- title: Build static images
+ build_application_images:
type: parallel
stage: build
steps:
- cloudharness-flask:
+ accounts:
type: build
stage: build
dockerfile: Dockerfile
@@ -108,23 +85,18 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/cloudharness-flask
- title: Cloudharness flask
- working_directory: ./infrastructure/common-images/cloudharness-flask
- tag: '${{CLOUDHARNESS_FLASK_TAG}}'
+ image_name: cloudharness/accounts
+ title: Accounts
+ working_directory: ./applications/accounts
+ tag: '${{ACCOUNTS_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{CLOUDHARNESS_FLASK_TAG_EXISTS}}', '{{CLOUDHARNESS_FLASK_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{ACCOUNTS_TAG_EXISTS}}', '{{ACCOUNTS_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{CLOUDHARNESS_FLASK_TAG_FORCE_BUILD}}', '{{CLOUDHARNESS_FLASK_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{ACCOUNTS_TAG_FORCE_BUILD}}', '{{ACCOUNTS_TAG_FORCE_BUILD}}')
== false
- build_application_images:
- type: parallel
- stage: build
- steps:
- nfsserver:
+ jupyterhub:
type: build
stage: build
dockerfile: Dockerfile
@@ -134,18 +106,19 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- image_name: cloudharness/nfsserver
- title: Nfsserver
- working_directory: ./applications/nfsserver
- tag: '${{NFSSERVER_TAG}}'
+ - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
+ image_name: cloudharness/jupyterhub
+ title: Jupyterhub
+ working_directory: ./applications/jupyterhub
+ tag: '${{JUPYTERHUB_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{NFSSERVER_TAG_EXISTS}}', '{{NFSSERVER_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_TAG_EXISTS}}', '{{JUPYTERHUB_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{NFSSERVER_TAG_FORCE_BUILD}}', '{{NFSSERVER_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{JUPYTERHUB_TAG_FORCE_BUILD}}', '{{JUPYTERHUB_TAG_FORCE_BUILD}}')
== false
- accounts:
+ jupyterhub-zero-to-jupyterhub-k8s-images-secret-sync:
type: build
stage: build
dockerfile: Dockerfile
@@ -155,18 +128,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- image_name: cloudharness/accounts
- title: Accounts
- working_directory: ./applications/accounts
- tag: '${{ACCOUNTS_TAG}}'
+ image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-secret-sync
+ title: Jupyterhub zero to jupyterhub k8s images secret sync
+ working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/secret-sync
+ tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{ACCOUNTS_TAG_EXISTS}}', '{{ACCOUNTS_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_EXISTS}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{ACCOUNTS_TAG_FORCE_BUILD}}', '{{ACCOUNTS_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SECRET_SYNC_TAG_FORCE_BUILD}}')
== false
- samples:
+ jupyterhub-zero-to-jupyterhub-k8s-images-image-awaiter:
type: build
stage: build
dockerfile: Dockerfile
@@ -176,20 +151,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_FRONTEND_BUILD=${{REGISTRY}}/cloudharness/cloudharness-frontend-build:${{CLOUDHARNESS_FRONTEND_BUILD_TAG}}
- - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}}
- image_name: cloudharness/samples
- title: Samples
- working_directory: ./applications/samples
- tag: '${{SAMPLES_TAG}}'
+ image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-image-awaiter
+ title: Jupyterhub zero to jupyterhub k8s images image awaiter
+ working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/image-awaiter
+ tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{SAMPLES_TAG_EXISTS}}', '{{SAMPLES_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_EXISTS}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{SAMPLES_TAG_FORCE_BUILD}}', '{{SAMPLES_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_IMAGE_AWAITER_TAG_FORCE_BUILD}}')
== false
- samples-print-file:
+ jupyterhub-zero-to-jupyterhub-k8s-images-singleuser-sample:
type: build
stage: build
dockerfile: Dockerfile
@@ -199,19 +174,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/samples-print-file
- title: Samples print file
- working_directory: ./applications/samples/tasks/print-file
- tag: '${{SAMPLES_PRINT_FILE_TAG}}'
+ image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-singleuser-sample
+ title: Jupyterhub zero to jupyterhub k8s images singleuser sample
+ working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/singleuser-sample
+ tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{SAMPLES_PRINT_FILE_TAG_EXISTS}}', '{{SAMPLES_PRINT_FILE_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_EXISTS}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{SAMPLES_PRINT_FILE_TAG_FORCE_BUILD}}', '{{SAMPLES_PRINT_FILE_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_SINGLEUSER_SAMPLE_TAG_FORCE_BUILD}}')
== false
- samples-secret:
+ jupyterhub-zero-to-jupyterhub-k8s-images-network-tools:
type: build
stage: build
dockerfile: Dockerfile
@@ -221,19 +197,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/samples-secret
- title: Samples secret
- working_directory: ./applications/samples/tasks/secret
- tag: '${{SAMPLES_SECRET_TAG}}'
+ image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-network-tools
+ title: Jupyterhub zero to jupyterhub k8s images network tools
+ working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/network-tools
+ tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{SAMPLES_SECRET_TAG_EXISTS}}', '{{SAMPLES_SECRET_TAG_EXISTS}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_EXISTS}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_EXISTS}}')
== true
- forceNoCache: includes('${{SAMPLES_SECRET_TAG_FORCE_BUILD}}', '{{SAMPLES_SECRET_TAG_FORCE_BUILD}}')
+ forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_NETWORK_TOOLS_TAG_FORCE_BUILD}}')
== false
- samples-sum:
+ jupyterhub-zero-to-jupyterhub-k8s-images-hub:
type: build
stage: build
dockerfile: Dockerfile
@@ -243,19 +220,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/samples-sum
- title: Samples sum
- working_directory: ./applications/samples/tasks/sum
- tag: '${{SAMPLES_SUM_TAG}}'
+ image_name: cloudharness/jupyterhub-zero-to-jupyterhub-k8s-images-hub
+ title: Jupyterhub zero to jupyterhub k8s images hub
+ working_directory: ./applications/jupyterhub/zero-to-jupyterhub-k8s/images/hub
+ tag: '${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{SAMPLES_SUM_TAG_EXISTS}}', '{{SAMPLES_SUM_TAG_EXISTS}}')
- == true
- forceNoCache: includes('${{SAMPLES_SUM_TAG_FORCE_BUILD}}', '{{SAMPLES_SUM_TAG_FORCE_BUILD}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_EXISTS}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_EXISTS}}') ==
+ true
+ forceNoCache: includes('${{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_ZERO_TO_JUPYTERHUB_K8S_IMAGES_HUB_TAG_FORCE_BUILD}}')
== false
- common:
+ jupyterhub-jupyterhub:
type: build
stage: build
dockerfile: Dockerfile
@@ -265,19 +243,18 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}}
- image_name: cloudharness/common
- title: Common
- working_directory: ./applications/common/server
- tag: '${{COMMON_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub
+ title: Jupyterhub jupyterhub
+ working_directory: ./applications/jupyterhub/src/jupyterhub
+ tag: '${{JUPYTERHUB_JUPYTERHUB_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{COMMON_TAG_EXISTS}}', '{{COMMON_TAG_EXISTS}}')
- == true
- forceNoCache: includes('${{COMMON_TAG_FORCE_BUILD}}', '{{COMMON_TAG_FORCE_BUILD}}')
- == false
- workflows-send-result-event:
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_TAG_FORCE_BUILD}}') == false
+ jupyterhub-jupyterhub-singleuser:
type: build
stage: build
dockerfile: Dockerfile
@@ -287,19 +264,18 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/workflows-send-result-event
- title: Workflows send result event
- working_directory: ./applications/workflows/tasks/send-result-event
- tag: '${{WORKFLOWS_SEND_RESULT_EVENT_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-singleuser
+ title: Jupyterhub jupyterhub singleuser
+ working_directory: ./applications/jupyterhub/src/jupyterhub/singleuser
+ tag: '${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{WORKFLOWS_SEND_RESULT_EVENT_TAG_EXISTS}}',
- '{{WORKFLOWS_SEND_RESULT_EVENT_TAG_EXISTS}}') == true
- forceNoCache: includes('${{WORKFLOWS_SEND_RESULT_EVENT_TAG_FORCE_BUILD}}',
- '{{WORKFLOWS_SEND_RESULT_EVENT_TAG_FORCE_BUILD}}') == false
- workflows-extract-download:
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_SINGLEUSER_TAG_FORCE_BUILD}}') == false
+ jupyterhub-jupyterhub-examples-service-fastapi:
type: build
stage: build
dockerfile: Dockerfile
@@ -309,18 +285,20 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- image_name: cloudharness/workflows-extract-download
- title: Workflows extract download
- working_directory: ./applications/workflows/tasks/extract-download
- tag: '${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-examples-service-fastapi
+ title: Jupyterhub jupyterhub examples service fastapi
+ working_directory: ./applications/jupyterhub/src/jupyterhub/examples/service-fastapi
+ tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_EXISTS}}',
- '{{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_EXISTS}}') == true
- forceNoCache: includes('${{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_FORCE_BUILD}}',
- '{{WORKFLOWS_EXTRACT_DOWNLOAD_TAG_FORCE_BUILD}}') == false
- workflows-notify-queue:
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_EXISTS}}') ==
+ true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_SERVICE_FASTAPI_TAG_FORCE_BUILD}}')
+ == false
+ jupyterhub-jupyterhub-examples-postgres-db:
type: build
stage: build
dockerfile: Dockerfile
@@ -330,19 +308,19 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/workflows-notify-queue
- title: Workflows notify queue
- working_directory: ./applications/workflows/tasks/notify-queue
- tag: '${{WORKFLOWS_NOTIFY_QUEUE_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-examples-postgres-db
+ title: Jupyterhub jupyterhub examples postgres db
+ working_directory: ./applications/jupyterhub/src/jupyterhub/examples/postgres/db
+ tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{WORKFLOWS_NOTIFY_QUEUE_TAG_EXISTS}}',
- '{{WORKFLOWS_NOTIFY_QUEUE_TAG_EXISTS}}') == true
- forceNoCache: includes('${{WORKFLOWS_NOTIFY_QUEUE_TAG_FORCE_BUILD}}',
- '{{WORKFLOWS_NOTIFY_QUEUE_TAG_FORCE_BUILD}}') == false
- workflows:
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_DB_TAG_FORCE_BUILD}}')
+ == false
+ jupyterhub-jupyterhub-examples-postgres-hub:
type: build
stage: build
dockerfile: Dockerfile
@@ -352,50 +330,19 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_FLASK=${{REGISTRY}}/cloudharness/cloudharness-flask:${{CLOUDHARNESS_FLASK_TAG}}
- image_name: cloudharness/workflows
- title: Workflows
- working_directory: ./applications/workflows/server
- tag: '${{WORKFLOWS_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-examples-postgres-hub
+ title: Jupyterhub jupyterhub examples postgres hub
+ working_directory: ./applications/jupyterhub/src/jupyterhub/examples/postgres/hub
+ tag: '${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{WORKFLOWS_TAG_EXISTS}}', '{{WORKFLOWS_TAG_EXISTS}}')
- == true
- forceNoCache: includes('${{WORKFLOWS_TAG_FORCE_BUILD}}', '{{WORKFLOWS_TAG_FORCE_BUILD}}')
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_EXAMPLES_POSTGRES_HUB_TAG_FORCE_BUILD}}')
== false
- tests_unit:
- stage: unittest
- type: parallel
- steps:
- samples_ut:
- title: Unit tests for samples
- commands:
- - pytest /usr/src/app/samples/test
- image: '${{REGISTRY}}/cloudharness/samples:${{SAMPLES_TAG}}'
- deployment:
- stage: deploy
- type: helm
- working_directory: ./${{CF_REPO_NAME}}
- title: Installing chart
- arguments:
- helm_version: 3.6.2
- chart_name: deployment/helm
- release_name: test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}}
- kube_context: '${{CLUSTER_NAME}}'
- namespace: test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}}
- chart_version: '${{CF_BUILD_ID}}'
- cmd_ps: --timeout 600s --create-namespace
- custom_value_files:
- - ./deployment/helm/values.yaml
- custom_values:
- - apps_samples_harness_secrets_asecret=${{ASECRET}}
- build_test_images:
- title: Build test images
- type: parallel
- stage: qa
- steps:
- test-e2e:
+ jupyterhub-jupyterhub-demo-image:
type: build
stage: build
dockerfile: Dockerfile
@@ -405,128 +352,58 @@ steps:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- image_name: cloudharness/test-e2e
- title: Test e2e
- working_directory: ./test/test-e2e
- tag: '${{TEST_E2E_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-demo-image
+ title: Jupyterhub jupyterhub demo image
+ working_directory: ./applications/jupyterhub/src/jupyterhub/demo-image
+ tag: '${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{TEST_E2E_TAG_EXISTS}}', '{{TEST_E2E_TAG_EXISTS}}')
- == true
- forceNoCache: includes('${{TEST_E2E_TAG_FORCE_BUILD}}', '{{TEST_E2E_TAG_FORCE_BUILD}}')
- == false
- test-api:
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_DEMO_IMAGE_TAG_FORCE_BUILD}}') == false
+ jupyterhub-jupyterhub-onbuild:
type: build
stage: build
- dockerfile: test/test-api/Dockerfile
+ dockerfile: Dockerfile
registry: '${{CODEFRESH_REGISTRY}}'
buildkit: true
build_arguments:
- DOMAIN=${{DOMAIN}}
- NOCACHE=${{CF_BUILD_ID}}
- REGISTRY=${{REGISTRY}}/cloudharness/
- - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
- image_name: cloudharness/test-api
- title: Test api
- working_directory: ./.
- tag: '${{TEST_API_TAG}}'
+ image_name: cloudharness/jupyterhub-jupyterhub-onbuild
+ title: Jupyterhub jupyterhub onbuild
+ working_directory: ./applications/jupyterhub/src/jupyterhub/onbuild
+ tag: '${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG}}'
when:
condition:
any:
- buildDoesNotExist: includes('${{TEST_API_TAG_EXISTS}}', '{{TEST_API_TAG_EXISTS}}')
- == true
- forceNoCache: includes('${{TEST_API_TAG_FORCE_BUILD}}', '{{TEST_API_TAG_FORCE_BUILD}}')
- == false
- wait_deployment:
- stage: qa
- title: Wait deployment to be ready
- image: codefresh/kubectl
- commands:
- - kubectl config use-context ${{CLUSTER_NAME}}
- - kubectl config set-context --current --namespace=test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}}
- - kubectl rollout status deployment/accounts
- - kubectl rollout status deployment/samples
- - kubectl rollout status deployment/common
- - kubectl rollout status deployment/workflows
- - sleep 60
- tests_api:
- stage: qa
- title: Api tests
- working_directory: /home/test
- image: '${{REGISTRY}}/cloudharness/test-api:${{TEST_API_TAG}}'
- fail_fast: false
- commands:
- - echo $APP_NAME
- scale:
- samples_api_test:
- title: samples api test
- volumes:
- - '${{CF_REPO_NAME}}/applications/samples:/home/test'
- - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml'
- environment:
- - APP_URL=https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api
- - USERNAME=sample@testuser.com
- - PASSWORD=test
- commands:
- - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url
- https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all --skip-deprecated-operations
- --hypothesis-suppress-health-check=too_slow --hypothesis-deadline=180000
- --request-timeout=180000 --hypothesis-max-examples=2 --show-errors-tracebacks
- - pytest -v test/api
- common_api_test:
- title: common api test
- volumes:
- - '${{CF_REPO_NAME}}/applications/common:/home/test'
- - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml'
- environment:
- - APP_URL=https://common.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api
- commands:
- - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url
- https://common.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all
- workflows_api_test:
- title: workflows api test
- volumes:
- - '${{CF_REPO_NAME}}/applications/workflows:/home/test'
- - '${{CF_REPO_NAME}}/deployment/helm/values.yaml:/opt/cloudharness/resources/allvalues.yaml'
- environment:
- - APP_URL=https://workflows.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api
- commands:
- - st --pre-run cloudharness_test.apitest_init run api/openapi.yaml --base-url
- https://workflows.${{CF_SHORT_REVISION}}.${{DOMAIN}}/api -c all
- hooks:
- on_fail:
- exec:
- image: alpine
- commands:
- - cf_export FAILED=failed
- tests_e2e:
- stage: qa
- title: End to end tests
- working_directory: /home/test
- image: '${{REGISTRY}}/cloudharness/test-e2e:${{TEST_E2E_TAG}}'
- fail_fast: false
- commands:
- - yarn test
- scale:
- samples_e2e_test:
- title: samples e2e test
- volumes:
- - '${{CF_REPO_NAME}}/applications/samples/test/e2e:/home/test/__tests__/samples'
- environment:
- - APP_URL=https://samples.${{CF_SHORT_REVISION}}.${{DOMAIN}}
- - USERNAME=sample@testuser.com
- - PASSWORD=test
- hooks:
- on_fail:
- exec:
- image: alpine
- commands:
- - cf_export FAILED=failed
+ buildDoesNotExist: includes('${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_EXISTS}}',
+ '{{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_EXISTS}}') == true
+ forceNoCache: includes('${{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_FORCE_BUILD}}',
+ '{{JUPYTERHUB_JUPYTERHUB_ONBUILD_TAG_FORCE_BUILD}}') == false
+ deployment:
+ stage: deploy
+ type: helm
+ working_directory: ./${{CF_REPO_NAME}}
+ title: Installing chart
+ arguments:
+ helm_version: 3.6.2
+ chart_name: deployment/helm
+ release_name: test-${{NAMESPACE_BASENAME}}
+ kube_context: '${{CLUSTER_NAME}}'
+ namespace: test-${{NAMESPACE_BASENAME}}
+ chart_version: '${{CF_SHORT_REVISION}}'
+ cmd_ps: --timeout 600s --create-namespace
+ custom_value_files:
+ - ./deployment/helm/values.yaml
+ custom_values: []
approval:
type: pending-approval
stage: qa
- title: Approve with failed tests
+ title: Approve anyway and delete deployment
description: The pipeline will fail after ${{WAIT_ON_FAIL}} minutes
timeout:
timeUnit: minutes
@@ -536,21 +413,11 @@ steps:
condition:
all:
error: '"${{FAILED}}" == "failed"'
- wait_on_fail: '${{WAIT_ON_FAIL}}'
- dummy_end:
- title: Dummy step
- description: Without this, the on_finish hook is executed before the approval
- step
- image: python:3.9.10
- stage: qa
- when:
- condition:
- all:
- error: '"${{FAILED}}" == "failed"'
- wait_on_fail: '${{WAIT_ON_FAIL}}'
-hooks:
- on_finish:
+ delete_deployment:
+ title: Delete deployment
+ description: The deployment is deleted at the end of the pipeline
image: codefresh/kubectl
+ stage: qa
commands:
- kubectl config use-context ${{CLUSTER_NAME}}
- - kubectl delete ns test-${{NAMESPACE_BASENAME}}-${{CF_SHORT_REVISION}}
+ - kubectl delete ns test-${{NAMESPACE_BASENAME}}
From f87869b8462be4bebe18b3eb9adf2c63392eaf1f Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Mon, 22 Jan 2024 12:38:05 +0100
Subject: [PATCH 004/210] CH-110 jupyterhub update OK; add tests
---
.../deploy/resources/hub/jupyterhub_config.py | 6 ++--
.../jupyterhub/deploy/values-test.yaml | 7 +++++
applications/jupyterhub/deploy/values.yaml | 6 ++++
.../harness_jupyter/jupyterhub.py | 2 +-
applications/samples/deploy/values-test.yaml | 8 +++++
deployment/codefresh-test.yaml | 30 +++++++++++++++++++
6 files changed, 55 insertions(+), 4 deletions(-)
create mode 100644 applications/jupyterhub/deploy/values-test.yaml
diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
index 5ebe20b5..8fdfa8c1 100755
--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
@@ -12,8 +12,6 @@
#CLOUDHARNESS: EDIT START
import logging
-from kubernetes import client
-from jupyterhub.utils import url_path_join
try:
from harness_jupyter.jupyterhub import harness_hub
@@ -492,6 +490,7 @@ def camelCaseify(s):
cfg.pop("keys", None)
c[app].update(cfg)
+
# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
if os.path.isdir(config_dir):
@@ -560,4 +559,5 @@ def camelCaseify(s):
c.registry = get_config('registry')
c.domain = get_config('root.domain')
c.namespace = get_config('root.namespace')
-# CLOUDHARNESS: EDIT END
\ No newline at end of file
+# CLOUDHARNESS: EDIT END
+
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/values-test.yaml b/applications/jupyterhub/deploy/values-test.yaml
new file mode 100644
index 00000000..3ca312d3
--- /dev/null
+++ b/applications/jupyterhub/deploy/values-test.yaml
@@ -0,0 +1,7 @@
+harness:
+ accounts:
+ users:
+ - username: samplehub@testuser.com
+ realmRoles:
+ - offline_access
+
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
index 41e108d6..b871b33b 100755
--- a/applications/jupyterhub/deploy/values.yaml
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -25,6 +25,12 @@ harness: # EDIT: CLOUDHARNESS
quota-ws-maxmem: 0.5
# sets the storage dedicated to the user data in Gb units (float)
quota-storage-max: 1.25
+ test:
+ e2e:
+ enabled: true
+ smoketest: true
+ ignoreRequestErrors: false
+ ignoreConsoleErrors: false
# fullnameOverride and nameOverride distinguishes blank strings, null values,
# and non-blank strings. For more details, see the configuration reference.
diff --git a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
index 220883a8..fc4d0dd0 100644
--- a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
+++ b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
@@ -139,7 +139,7 @@ def change_pod_manifest(self: KubeSpawner):
if 'subdomain' in harness and harness['subdomain'] == subdomain:
ws_image = getattr(self, "ws_image", None)
- logging.info("Subdomain is", subdomain)
+ logging.info("Subdomain is %s", subdomain)
if ws_image:
# try getting the image + tag from values.yaml
ch_conf = conf.get_configuration()
diff --git a/applications/samples/deploy/values-test.yaml b/applications/samples/deploy/values-test.yaml
index 3555108f..14274fd6 100644
--- a/applications/samples/deploy/values-test.yaml
+++ b/applications/samples/deploy/values-test.yaml
@@ -1,4 +1,12 @@
harness:
+ dependencies:
+ soft:
+ - workflows
+ - events
+ - accounts
+ - common
+ - nfsserver
+ - jupyterhub
accounts:
roles:
- role1
diff --git a/deployment/codefresh-test.yaml b/deployment/codefresh-test.yaml
index ff7a88af..7280c8e6 100644
--- a/deployment/codefresh-test.yaml
+++ b/deployment/codefresh-test.yaml
@@ -165,6 +165,28 @@ steps:
== true
forceNoCache: includes('${{ACCOUNTS_TAG_FORCE_BUILD}}', '{{ACCOUNTS_TAG_FORCE_BUILD}}')
== false
+ jupyterhub:
+ type: build
+ stage: build
+ dockerfile: Dockerfile
+ registry: '${{CODEFRESH_REGISTRY}}'
+ buildkit: true
+ build_arguments:
+ - DOMAIN=${{DOMAIN}}
+ - NOCACHE=${{CF_BUILD_ID}}
+ - REGISTRY=${{REGISTRY}}/cloudharness/
+ - CLOUDHARNESS_BASE=${{REGISTRY}}/cloudharness/cloudharness-base:${{CLOUDHARNESS_BASE_TAG}}
+ image_name: cloudharness/jupyterhub
+ title: Jupyterhub
+ working_directory: ./applications/jupyterhub
+ tag: '${{JUPYTERHUB_TAG}}'
+ when:
+ condition:
+ any:
+ buildDoesNotExist: includes('${{JUPYTERHUB_TAG_EXISTS}}', '{{JUPYTERHUB_TAG_EXISTS}}')
+ == true
+ forceNoCache: includes('${{JUPYTERHUB_TAG_FORCE_BUILD}}', '{{JUPYTERHUB_TAG_FORCE_BUILD}}')
+ == false
samples:
type: build
stage: build
@@ -510,6 +532,14 @@ steps:
commands:
- yarn test
scale:
+ jupyterhub_e2e_test:
+ title: jupyterhub e2e test
+ volumes:
+ - '${{CF_REPO_NAME}}/applications/jupyterhub/test/e2e:/home/test/__tests__/jupyterhub'
+ environment:
+ - APP_URL=https://hub.${{DOMAIN}}
+ - USERNAME=samplehub@testuser.com
+ - PASSWORD=test
samples_e2e_test:
title: samples e2e test
volumes:
From 9c905eb17cc78d39c5fc560bc5f250ab649ec7ba Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Mon, 22 Jan 2024 15:13:19 +0100
Subject: [PATCH 005/210] CH-110 fix test
---
applications/jupyterhub/deploy/values-test.yaml | 6 ------
deployment/codefresh-test.yaml | 4 +++-
2 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/applications/jupyterhub/deploy/values-test.yaml b/applications/jupyterhub/deploy/values-test.yaml
index 3ca312d3..8b137891 100644
--- a/applications/jupyterhub/deploy/values-test.yaml
+++ b/applications/jupyterhub/deploy/values-test.yaml
@@ -1,7 +1 @@
-harness:
- accounts:
- users:
- - username: samplehub@testuser.com
- realmRoles:
- - offline_access
diff --git a/deployment/codefresh-test.yaml b/deployment/codefresh-test.yaml
index bbedd788..d15db5cd 100644
--- a/deployment/codefresh-test.yaml
+++ b/deployment/codefresh-test.yaml
@@ -467,7 +467,9 @@ steps:
- kubectl config use-context ${{CLUSTER_NAME}}
- kubectl config set-context --current --namespace=test-${{NAMESPACE_BASENAME}}
- kubectl rollout status deployment/accounts
+ - kubectl rollout status deployment/argo-server-gk
- kubectl rollout status deployment/samples
+ - kubectl rollout status deployment/samples-gk
- kubectl rollout status deployment/common
- kubectl rollout status deployment/workflows
- sleep 60
@@ -536,7 +538,7 @@ steps:
- '${{CF_REPO_NAME}}/applications/jupyterhub/test/e2e:/home/test/__tests__/jupyterhub'
environment:
- APP_URL=https://hub.${{DOMAIN}}
- - USERNAME=samplehub@testuser.com
+ - USERNAME=sample@testuser.com
- PASSWORD=test
samples_e2e_test:
title: samples e2e test
From 313b9e47491c5c2c1a8795dab01147893245dc8d Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Mon, 22 Jan 2024 16:52:10 +0100
Subject: [PATCH 006/210] CH-110 Disable hub network policy
---
applications/jupyterhub/deploy/values.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
index b871b33b..9be3ad5d 100755
--- a/applications/jupyterhub/deploy/values.yaml
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -136,7 +136,7 @@ hub:
maxUnavailable:
minAvailable: 1
networkPolicy:
- enabled: true
+ enabled: false # EDIT: CLOUDHARNESS -- cannot connect to accounts otherwise
ingress: []
egress: []
egressAllowRules:
From 24132a5d52e806d21cfabad02345f6140ecfebf6 Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Tue, 23 Jan 2024 16:38:26 +0100
Subject: [PATCH 007/210] CH-94 add control on image prepull
---
.../image-puller/_helpers-daemonset.tpl | 20 ++++++++++++
.../jupyterhub/deploy/values-test.yaml | 6 +++-
docs/jupyterhub.md | 31 ++++++++++++++++++-
3 files changed, 55 insertions(+), 2 deletions(-)
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
index 528345c0..f872a336 100644
--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
@@ -199,7 +199,27 @@ spec:
securityContext:
{{- . | toYaml | nindent 12 }}
{{- end }}
+ {{- end }}
+ {{- /* --- EDIT: CLOUDHARNESS pull images --- */}}
+ {{- if $.Values.apps.jupyterhub.harness.jupyterhub.prepull -}}
+ {{- range $k, $v := $.Values.apps.jupyterhub.harness.jupyterhub.prepull }}
+ - name: image-pull--{{ $v }}
+ image: {{ get ( get $.Values "task-images" ) $v }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with $.Values.apps.jupyterhub.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
{{- end }}
+ {{- end }}
+ {{- /* --- END EDIT: CLOUDHARNESS pull images --- */}}
containers:
- name: pause
image: {{ .Values.apps.jupyterhub.prePuller.pause.image.name }}:{{ .Values.apps.jupyterhub.prePuller.pause.image.tag }}
diff --git a/applications/jupyterhub/deploy/values-test.yaml b/applications/jupyterhub/deploy/values-test.yaml
index 8b137891..653cae1e 100644
--- a/applications/jupyterhub/deploy/values-test.yaml
+++ b/applications/jupyterhub/deploy/values-test.yaml
@@ -1 +1,5 @@
-
+harness:
+ jupyterhub:
+ prepull:
+ - cloudharness-base
+
diff --git a/docs/jupyterhub.md b/docs/jupyterhub.md
index 709ede5f..3d7046bc 100644
--- a/docs/jupyterhub.md
+++ b/docs/jupyterhub.md
@@ -35,6 +35,7 @@ Edit the `deploy/values.yaml` file `harness.jupyterhub` section to edit configu
- `applicationHook`: change the hook function (advances, see below)
- `extraConfig`: allows you to add Python snippets to the jupyterhub_config.py file
- `spawnerExtraConfig`: allows you to add values to the spawner object without the need of creating a new hook
+- `prepull`: indicate images that will be prepulled from the current build
Example:
```yaml
@@ -46,6 +47,8 @@ harness:
name: proxy-public
jupyterhub:
args: ["--debug", "--NotebookApp.default_url=/lab"]
+ prepull:
+ - cloudharness-base
extraConfig:
timing: |
c.Spawner.port = 8000
@@ -179,4 +182,30 @@ Cloudharness JupyterHub is integrated with the accounts service so enabling a sh
The spawner is also adapted providing a hook to allow other applications to be based on the hub spawner to run with their own configurations.
-Available
\ No newline at end of file
+Available
+
+## Prepull configuration
+Image prepull can be configured in two ways.
+
+For static images (tag known), can set `prepuller.extraImages` on `applications/jupyterhub/deploy/values.yaml`, like:
+
+```yaml
+prePuller:
+ extraImages:
+ nginx-image:
+ name: nginx
+ tag: latest
+```
+
+For images which build is managed by CloudHarness the tag is unknown during the configuration;
+for this case, can rely on the dynamic configuration through `harness.jupyterhub.prepull` variable, like:
+
+```yaml
+harness:
+ jupyterhub:
+ prepull:
+ - cloudharness-base
+```
+
+> Note that only built images defined as tasks, base or common can be used here.
+> If an image is not included, it might be required to include it also as a build dependency or, better, define task images directly inside your jupyterhub application override.
From 99958b7dbd8ab21ab42addf16909653fc365ec46 Mon Sep 17 00:00:00 2001
From: Zoran Sinnema
Date: Mon, 29 Jan 2024 10:57:13 +0100
Subject: [PATCH 008/210] chore(): save jupyterhub profile list in self:
---
.../jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
index fc4d0dd0..ac7dafa6 100644
--- a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
+++ b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
@@ -18,6 +18,7 @@
def custom_options_form(spawner, abc):
# let's skip the profile selection form for now
# ToDo: for future we can remove this hook
+ spawner._ch_profile_list = spawner.profile_list
spawner.profile_list = []
# ref: https://github.com/jupyterhub/kubespawner/blob/37a80abb0a6c826e5c118a068fa1cf2725738038/kubespawner/spawner.py#L1885-L1935
return spawner._options_form_default()
From 7126e57da27642aa5db6148692212443e23ed5cf Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Tue, 30 Jan 2024 13:34:57 +0100
Subject: [PATCH 009/210] CH-110 fix custom prepull issue
---
.../deploy/templates/image-puller/_helpers-daemonset.tpl | 4 ++--
applications/jupyterhub/deploy/values.yaml | 2 ++
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
index f872a336..04fb18a3 100644
--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
@@ -201,8 +201,8 @@ spec:
{{- end }}
{{- end }}
{{- /* --- EDIT: CLOUDHARNESS pull images --- */}}
- {{- if $.Values.apps.jupyterhub.harness.jupyterhub.prepull -}}
- {{- range $k, $v := $.Values.apps.jupyterhub.harness.jupyterhub.prepull }}
+ {{- if $.Values.apps.jupyterhub.harness.dependencies.prepull -}}
+ {{- range $k, $v := $.Values.apps.jupyterhub.harness.dependencies.prepull }}
- name: image-pull--{{ $v }}
image: {{ get ( get $.Values "task-images" ) $v }}
command:
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
index 9be3ad5d..5acc7928 100755
--- a/applications/jupyterhub/deploy/values.yaml
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -12,6 +12,7 @@ harness: # EDIT: CLOUDHARNESS
- accounts
build:
- cloudharness-base
+ prepull: [] # additional images to add to the prepuller
quotas:
# sets the maximum number of (included named) servers open concurrently (int)
quota-ws-open: 3
@@ -31,6 +32,7 @@ harness: # EDIT: CLOUDHARNESS
smoketest: true
ignoreRequestErrors: false
ignoreConsoleErrors: false
+
# fullnameOverride and nameOverride distinguishes blank strings, null values,
# and non-blank strings. For more details, see the configuration reference.
From d25230ff5144cfead4bf98eb343ddd684dfa654a Mon Sep 17 00:00:00 2001
From: aranega
Date: Tue, 6 Feb 2024 13:23:28 -0600
Subject: [PATCH 010/210] CH-100 Add first code to call a dedicated
docker-compose generation
---
.../deploy/templates/{ => helm}/argo-sa.yaml | 0
.../templates/{ => helm}/broker-config.yml | 0
.../templates/{ => helm}/configmap.yaml | 0
.../templates/{ => helm}/deployments.yml | 0
.../deploy/templates/{ => helm}/roles.yml | 0
.../deploy/templates/{ => helm}/services.yml | 0
.../templates/{ => helm}/zoo-config.yml | 0
.../{ => helm}/_helpers-auth-rework.tpl | 0
.../templates/{ => helm}/_helpers-names.tpl | 0
.../deploy/templates/{ => helm}/_helpers.tpl | 0
.../{ => helm}/hub/_helpers-passwords.tpl | 0
.../templates/{ => helm}/hub/configmap.yaml | 0
.../templates/{ => helm}/hub/deployment.yaml | 0
.../templates/{ => helm}/hub/netpol.yaml | 0
.../deploy/templates/{ => helm}/hub/pdb.yaml | 0
.../deploy/templates/{ => helm}/hub/pvc.yaml | 0
.../deploy/templates/{ => helm}/hub/rbac.yaml | 0
.../templates/{ => helm}/hub/secret.yaml | 0
.../templates/{ => helm}/hub/service.yaml | 0
.../image-puller/_helpers-daemonset.tpl | 0
.../image-puller/daemonset-continuous.yaml | 0
.../image-puller/daemonset-hook.yaml | 0
.../{ => helm}/image-puller/job.yaml | 0
.../{ => helm}/image-puller/rbac.yaml | 0
.../{ => helm}/proxy/autohttps/_README.txt | 0
.../{ => helm}/proxy/autohttps/configmap.yaml | 0
.../proxy/autohttps/deployment.yaml | 0
.../{ => helm}/proxy/autohttps/rbac.yaml | 0
.../{ => helm}/proxy/autohttps/service.yaml | 0
.../{ => helm}/proxy/deployment.yaml | 0
.../templates/{ => helm}/proxy/netpol.yaml | 0
.../templates/{ => helm}/proxy/pdb.yaml | 0
.../templates/{ => helm}/proxy/secret.yaml | 0
.../templates/{ => helm}/proxy/service.yaml | 0
.../scheduling/_scheduling-helpers.tpl | 0
.../{ => helm}/scheduling/priorityclass.yaml | 0
.../scheduling/user-placeholder/pdb.yaml | 0
.../user-placeholder/priorityclass.yaml | 0
.../user-placeholder/statefulset.yaml | 0
.../scheduling/user-scheduler/configmap.yaml | 0
.../scheduling/user-scheduler/deployment.yaml | 0
.../scheduling/user-scheduler/pdb.yaml | 0
.../scheduling/user-scheduler/rbac.yaml | 0
.../{ => helm}/singleuser/netpol.yaml | 0
.../deploy/templates/{ => helm}/_helpers.tpl | 0
.../templates/{ => helm}/clusterrole.yaml | 0
.../{ => helm}/clusterrolebinding.yaml | 0
.../templates/{ => helm}/nfs-server.yaml | 0
.../{ => helm}/podsecuritypolicy.yaml | 0
.../deploy/templates/{ => helm}/role.yaml | 0
.../templates/{ => helm}/rolebinding.yaml | 0
.../templates/{ => helm}/serviceaccount.yaml | 0
.../templates/{ => helm}/storageclass.yaml | 0
.../deploy/templates/{ => helm}/redis.yaml | 0
deployment-configuration/compose/.helmignore | 22 +
deployment-configuration/compose/Chart.yaml | 10 +
deployment-configuration/compose/README.md | 4 +
.../compose/templates/auto-compose.yaml | 103 +++
deployment-configuration/compose/values.yaml | 79 ++
.../ch_cli_tools/dockercompose.py | 753 ++++++++++++++++++
.../deployment-cli-tools/ch_cli_tools/helm.py | 10 +-
tools/deployment-cli-tools/harness-deployment | 41 +-
62 files changed, 1015 insertions(+), 7 deletions(-)
rename applications/argo/deploy/templates/{ => helm}/argo-sa.yaml (100%)
rename applications/events/deploy/templates/{ => helm}/broker-config.yml (100%)
rename applications/events/deploy/templates/{ => helm}/configmap.yaml (100%)
rename applications/events/deploy/templates/{ => helm}/deployments.yml (100%)
rename applications/events/deploy/templates/{ => helm}/roles.yml (100%)
rename applications/events/deploy/templates/{ => helm}/services.yml (100%)
rename applications/events/deploy/templates/{ => helm}/zoo-config.yml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/_helpers-auth-rework.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/_helpers-names.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/_helpers.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/_helpers-passwords.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/netpol.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/pvc.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/secret.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/hub/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/image-puller/_helpers-daemonset.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/image-puller/daemonset-continuous.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/image-puller/daemonset-hook.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/image-puller/job.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/image-puller/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/autohttps/_README.txt (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/autohttps/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/autohttps/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/autohttps/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/autohttps/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/netpol.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/secret.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/proxy/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/_scheduling-helpers.tpl (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/priorityclass.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-placeholder/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-placeholder/priorityclass.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-placeholder/statefulset.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-scheduler/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-scheduler/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-scheduler/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/scheduling/user-scheduler/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{ => helm}/singleuser/netpol.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/_helpers.tpl (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/clusterrole.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/clusterrolebinding.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/nfs-server.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/podsecuritypolicy.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/role.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/rolebinding.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/serviceaccount.yaml (100%)
rename applications/nfsserver/deploy/templates/{ => helm}/storageclass.yaml (100%)
rename applications/sentry/deploy/templates/{ => helm}/redis.yaml (100%)
create mode 100644 deployment-configuration/compose/.helmignore
create mode 100644 deployment-configuration/compose/Chart.yaml
create mode 100644 deployment-configuration/compose/README.md
create mode 100644 deployment-configuration/compose/templates/auto-compose.yaml
create mode 100644 deployment-configuration/compose/values.yaml
create mode 100644 tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
diff --git a/applications/argo/deploy/templates/argo-sa.yaml b/applications/argo/deploy/templates/helm/argo-sa.yaml
similarity index 100%
rename from applications/argo/deploy/templates/argo-sa.yaml
rename to applications/argo/deploy/templates/helm/argo-sa.yaml
diff --git a/applications/events/deploy/templates/broker-config.yml b/applications/events/deploy/templates/helm/broker-config.yml
similarity index 100%
rename from applications/events/deploy/templates/broker-config.yml
rename to applications/events/deploy/templates/helm/broker-config.yml
diff --git a/applications/events/deploy/templates/configmap.yaml b/applications/events/deploy/templates/helm/configmap.yaml
similarity index 100%
rename from applications/events/deploy/templates/configmap.yaml
rename to applications/events/deploy/templates/helm/configmap.yaml
diff --git a/applications/events/deploy/templates/deployments.yml b/applications/events/deploy/templates/helm/deployments.yml
similarity index 100%
rename from applications/events/deploy/templates/deployments.yml
rename to applications/events/deploy/templates/helm/deployments.yml
diff --git a/applications/events/deploy/templates/roles.yml b/applications/events/deploy/templates/helm/roles.yml
similarity index 100%
rename from applications/events/deploy/templates/roles.yml
rename to applications/events/deploy/templates/helm/roles.yml
diff --git a/applications/events/deploy/templates/services.yml b/applications/events/deploy/templates/helm/services.yml
similarity index 100%
rename from applications/events/deploy/templates/services.yml
rename to applications/events/deploy/templates/helm/services.yml
diff --git a/applications/events/deploy/templates/zoo-config.yml b/applications/events/deploy/templates/helm/zoo-config.yml
similarity index 100%
rename from applications/events/deploy/templates/zoo-config.yml
rename to applications/events/deploy/templates/helm/zoo-config.yml
diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/helm/_helpers-auth-rework.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
rename to applications/jupyterhub/deploy/templates/helm/_helpers-auth-rework.tpl
diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/helm/_helpers-names.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/_helpers-names.tpl
rename to applications/jupyterhub/deploy/templates/helm/_helpers-names.tpl
diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/helm/_helpers.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/_helpers.tpl
rename to applications/jupyterhub/deploy/templates/helm/_helpers.tpl
diff --git a/applications/jupyterhub/deploy/templates/hub/_helpers-passwords.tpl b/applications/jupyterhub/deploy/templates/helm/hub/_helpers-passwords.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/_helpers-passwords.tpl
rename to applications/jupyterhub/deploy/templates/helm/hub/_helpers-passwords.tpl
diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/helm/hub/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/configmap.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/helm/hub/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/deployment.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/helm/hub/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/netpol.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/netpol.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/helm/hub/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/pdb.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/pvc.yaml b/applications/jupyterhub/deploy/templates/helm/hub/pvc.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/pvc.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/pvc.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/helm/hub/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/rbac.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/secret.yaml b/applications/jupyterhub/deploy/templates/helm/hub/secret.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/secret.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/secret.yaml
diff --git a/applications/jupyterhub/deploy/templates/hub/service.yaml b/applications/jupyterhub/deploy/templates/helm/hub/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/hub/service.yaml
rename to applications/jupyterhub/deploy/templates/helm/hub/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/helm/image-puller/_helpers-daemonset.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
rename to applications/jupyterhub/deploy/templates/helm/image-puller/_helpers-daemonset.tpl
diff --git a/applications/jupyterhub/deploy/templates/image-puller/daemonset-continuous.yaml b/applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-continuous.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/image-puller/daemonset-continuous.yaml
rename to applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-continuous.yaml
diff --git a/applications/jupyterhub/deploy/templates/image-puller/daemonset-hook.yaml b/applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-hook.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/image-puller/daemonset-hook.yaml
rename to applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-hook.yaml
diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/helm/image-puller/job.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/image-puller/job.yaml
rename to applications/jupyterhub/deploy/templates/helm/image-puller/job.yaml
diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/helm/image-puller/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
rename to applications/jupyterhub/deploy/templates/helm/image-puller/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/_README.txt
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
rename to applications/jupyterhub/deploy/templates/helm/proxy/autohttps/_README.txt
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/autohttps/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/autohttps/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/autohttps/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/autohttps/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/deployment.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/netpol.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/netpol.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/pdb.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/secret.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/secret.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/secret.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/secret.yaml
diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/helm/proxy/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/proxy/service.yaml
rename to applications/jupyterhub/deploy/templates/helm/proxy/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl b/applications/jupyterhub/deploy/templates/helm/scheduling/_scheduling-helpers.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl
rename to applications/jupyterhub/deploy/templates/helm/scheduling/_scheduling-helpers.tpl
diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/priorityclass.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/priorityclass.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/priorityclass.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/priorityclass.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/statefulset.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/statefulset.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
rename to applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/helm/singleuser/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
rename to applications/jupyterhub/deploy/templates/helm/singleuser/netpol.yaml
diff --git a/applications/nfsserver/deploy/templates/_helpers.tpl b/applications/nfsserver/deploy/templates/helm/_helpers.tpl
similarity index 100%
rename from applications/nfsserver/deploy/templates/_helpers.tpl
rename to applications/nfsserver/deploy/templates/helm/_helpers.tpl
diff --git a/applications/nfsserver/deploy/templates/clusterrole.yaml b/applications/nfsserver/deploy/templates/helm/clusterrole.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/clusterrole.yaml
rename to applications/nfsserver/deploy/templates/helm/clusterrole.yaml
diff --git a/applications/nfsserver/deploy/templates/clusterrolebinding.yaml b/applications/nfsserver/deploy/templates/helm/clusterrolebinding.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/clusterrolebinding.yaml
rename to applications/nfsserver/deploy/templates/helm/clusterrolebinding.yaml
diff --git a/applications/nfsserver/deploy/templates/nfs-server.yaml b/applications/nfsserver/deploy/templates/helm/nfs-server.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/nfs-server.yaml
rename to applications/nfsserver/deploy/templates/helm/nfs-server.yaml
diff --git a/applications/nfsserver/deploy/templates/podsecuritypolicy.yaml b/applications/nfsserver/deploy/templates/helm/podsecuritypolicy.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/podsecuritypolicy.yaml
rename to applications/nfsserver/deploy/templates/helm/podsecuritypolicy.yaml
diff --git a/applications/nfsserver/deploy/templates/role.yaml b/applications/nfsserver/deploy/templates/helm/role.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/role.yaml
rename to applications/nfsserver/deploy/templates/helm/role.yaml
diff --git a/applications/nfsserver/deploy/templates/rolebinding.yaml b/applications/nfsserver/deploy/templates/helm/rolebinding.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/rolebinding.yaml
rename to applications/nfsserver/deploy/templates/helm/rolebinding.yaml
diff --git a/applications/nfsserver/deploy/templates/serviceaccount.yaml b/applications/nfsserver/deploy/templates/helm/serviceaccount.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/serviceaccount.yaml
rename to applications/nfsserver/deploy/templates/helm/serviceaccount.yaml
diff --git a/applications/nfsserver/deploy/templates/storageclass.yaml b/applications/nfsserver/deploy/templates/helm/storageclass.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/storageclass.yaml
rename to applications/nfsserver/deploy/templates/helm/storageclass.yaml
diff --git a/applications/sentry/deploy/templates/redis.yaml b/applications/sentry/deploy/templates/helm/redis.yaml
similarity index 100%
rename from applications/sentry/deploy/templates/redis.yaml
rename to applications/sentry/deploy/templates/helm/redis.yaml
diff --git a/deployment-configuration/compose/.helmignore b/deployment-configuration/compose/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/deployment-configuration/compose/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/deployment-configuration/compose/Chart.yaml b/deployment-configuration/compose/Chart.yaml
new file mode 100644
index 00000000..f294c3e7
--- /dev/null
+++ b/deployment-configuration/compose/Chart.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+appVersion: "0.0.1"
+description: CloudHarness Helm Chart
+name: cloudharness
+version: 0.0.1
+maintainers:
+ - name: Filippo Ledda
+ email: filippo@metacell.us
+ - name: Zoran Sinnema
+ email: zoran@metacell.us
diff --git a/deployment-configuration/compose/README.md b/deployment-configuration/compose/README.md
new file mode 100644
index 00000000..abeab69d
--- /dev/null
+++ b/deployment-configuration/compose/README.md
@@ -0,0 +1,4 @@
+# CloudHarness Helm chart: deploy CloudHarness to k8s
+
+Helm is used to define the CloudHarness deployment on Kubernetes. For further information about Helm, see https://helm.sh.
+
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
new file mode 100644
index 00000000..5b4893ba
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -0,0 +1,103 @@
+version: '3.7'
+
+services:
+{{- range $app_name, $app_config := .Values.apps }}
+ {{- if has $app_name (list "argo" "nfsserver" "workflows" "events" ) }} {{- /* We deactivate generation for some services */}}
+ {{- continue }}
+ {{- end}}
+ {{ $deployment := $app_config.harness.deployment }}
+ {{ $app_name }}:
+ {{- with $app_config.domain }}
+ domainname: {{ . }}
+ {{- end }}
+ networks:
+ - ch
+ {{- with $app_config.image }}
+ image: {{ . }}
+ {{- end }}
+ {{- with $app_config.harness.service.port }}
+ ports:
+ - "{{ . }}:{{ $app_config.harness.deployment.port }}"
+ {{- end}}
+ deploy:
+ mode: "replicated"
+ replicas: {{ $deployment.replicas | default 1 }}
+ resources:
+ limits:
+ cpus: {{ $deployment.resources.limits.cpu | default "50m" }}
+ memory: {{ trimSuffix "i" $deployment.resources.limits.memory | default "64M" }}
+ reservations:
+ cpus: {{ $deployment.resources.requests.cpu | default "25m" }}
+ memory: {{ trimSuffix "i" $deployment.resources.requests.memory | default "32M" }}
+ environment:
+ - CH_CURRENT_APP_NAME={{ $app_name | quote }}
+
+ {{- range $.Values.env }}
+ - {{ .name }}={{ .value | quote }}
+ {{- end }}
+ {{- /*{{- range $.Values.env }}
+ - {{ .name }}={{ .value | quote }}
+ {{- end }} */}}
+ {{- range $app_config.harness.env }}
+ - {{ .name }}={{ .value | quote }}
+ {{- end }}
+ {{- with $app_config.harness.dependencies.soft }}
+ # links:
+ # {{- range . }}
+ # - {{ . }}
+ # {{- end }}
+ {{- end }}
+ {{- with $app_config.harness.dependencies.hard }}
+ depends_on:
+ {{- range . }}
+ - {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if or $deployment.volume $app_config.harness.resources }}
+ volumes:
+ {{- with $deployment.volume }}
+ - type: volume
+ source: {{ .name }}
+ target: {{ .mountpath }}
+ {{- end}}
+ {{- with $app_config.harness.resources }}
+ {{- range .}}
+ - type: bind
+ source: compose/resources/{{ $app_name }}/{{.src }}
+ target: {{ .dst }}
+ {{- end }}
+ {{- end}}
+ {{- end }}
+{{- end }}
+
+ traefik:
+ image: "traefik:v2.2"
+ container_name: "traefik"
+ networks:
+ - ch
+ command:
+ - "--log.level=INFO"
+ - "--api.insecure=true"
+ - "--providers.docker=true"
+ - "--providers.docker.exposedbydefault=false"
+ - "--entrypoints.web.address=:80"
+ - "--entrypoints.websecure.address=:443"
+ - "--providers.file.directory=/etc/traefik/dynamic_conf"
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock:ro"
+ - "./certs/:/certs/:ro"
+ - "./traefik.yaml:/etc/traefik/dynamic_conf/conf.yml:ro"
+
+networks:
+ ch:
+ name: ch_network
+
+volumes: # this inclusion needs to be conditional
+{{- range $app_name, $app_config := .Values.apps }}
+ {{- with $app_config.harness.deployment.volume }}
+ {{ .name }}:
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/values.yaml b/deployment-configuration/compose/values.yaml
new file mode 100644
index 00000000..434dcac7
--- /dev/null
+++ b/deployment-configuration/compose/values.yaml
@@ -0,0 +1,79 @@
+# -- If set to true, local DNS mapping is added to pods.
+local: false
+# -- Enables/disables Gatekeeper.
+secured_gatekeepers: true
+# -- The root domain.
+domain: ${{DOMAIN}}
+# -- The K8s namespace.
+namespace: ch
+# -- Name of mainapp, routes incoming traffic of root `domaim` to this app.
+mainapp: accounts
+registry:
+ # -- The docker registry.
+ name: "localhost:5000"
+ # -- Optional secret used for pulling from docker registry.
+ secret:
+# -- Docker tag used to pull images.
+tag: latest
+# -- List of applications.
+# @default -- Will be filled automatically.
+apps: {}
+env:
+ # -- Cloud Harness version
+ - name: CH_VERSION
+ value: 0.0.1
+ # -- Cloud harness chart version
+ - name: CH_CHART_VERSION
+ value: 0.0.1
+privenv:
+ # -- Defines a secret as private environment variable that is injected in containers.
+ - name: CH_SECRET
+ value: In God we trust; all others must bring data. ― W. Edwards Deming
+ingress:
+ # -- Flag to enable/disalbe ingress controller.
+ enabled: true
+ # -- K8s Name of ingress.
+ name: cloudharness-ingress
+ # -- Enables/disables SSL redirect.
+ ssl_redirect: true
+ letsencrypt:
+ # -- Email for letsencrypt.
+ email: filippo@metacell.us
+backup:
+ # -- Flag to enable/disable backups.
+ active: false
+ # -- Number of days to keep backups.
+ keep_days: "7"
+ # -- Number of weeks to keep backups.
+ keep_weeks: "4"
+ # -- Number of months to keep backups.
+ keep_months: "6"
+ # -- Schedule as cronjob expression.
+ schedule: "*/5 * * * *"
+ # -- The file suffix added to backup files.
+ suffix: ".gz"
+ # -- The volume size for backups (all backups share the same volume)
+ volumesize: "2Gi"
+ # -- Target directory of backups, the mount point of the persistent volume.
+ dir: "/backups"
+ resources:
+ requests:
+ # -- K8s memory resource definition.
+ memory: "32Mi"
+ # -- K8s cpu resource definition.
+ cpu: "25m"
+ limits:
+ # -- K8s memory resource definition.
+ memory: "64Mi"
+ # -- K8s cpu resource definition.
+ cpu: "50m"
+proxy:
+ timeout:
+ # -- Timeout for proxy connections in seconds.
+ send: 60
+ # -- Timeout for proxy responses in seconds.
+ read: 60
+ keepalive: 60
+ payload:
+ # -- Maximum size of payload in MB
+ max: 250
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
new file mode 100644
index 00000000..39ff0272
--- /dev/null
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -0,0 +1,753 @@
+"""
+Utilities to create a helm chart from a CloudHarness directory structure
+"""
+import yaml
+import os
+import shutil
+import logging
+from hashlib import sha1
+import subprocess
+from functools import cache
+import tarfile
+from docker import from_env as DockerClient
+from pathlib import Path
+
+
+from . import HERE, CH_ROOT
+from cloudharness_utils.constants import TEST_IMAGES_PATH, VALUES_MANUAL_PATH, HELM_CHART_PATH, APPS_PATH, HELM_PATH, \
+ DEPLOYMENT_CONFIGURATION_PATH, BASE_IMAGES_PATH, STATIC_IMAGES_PATH
+from .utils import get_cluster_ip, get_image_name, env_variable, get_sub_paths, guess_build_dependencies_from_dockerfile, image_name_from_dockerfile_path, \
+ get_template, merge_configuration_directories, merge_to_yaml_file, dict_merge, app_name_from_path, \
+ find_dockerfiles_paths
+
+from .models import HarnessMainConfig
+
+KEY_HARNESS = 'harness'
+KEY_SERVICE = 'service'
+KEY_DATABASE = 'database'
+KEY_DEPLOYMENT = 'deployment'
+KEY_APPS = 'apps'
+KEY_TASK_IMAGES = 'task-images'
+KEY_TEST_IMAGES = 'test-images'
+
+DEFAULT_IGNORE = ('/tasks', '.dockerignore', '.hypothesis', "__pycache__", '.node_modules', 'dist', 'build', '.coverage')
+
+
+def create_docker_compose_configuration(root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True,
+ output_path='./deployment', include=None, registry_secret=None, tls=True, env=None,
+ namespace=None, templates_path=HELM_PATH) -> HarnessMainConfig:
+ if (type(env)) == str:
+ env = [env]
+ return CloudHarnessHelm(root_paths, tag=tag, registry=registry, local=local, domain=domain, exclude=exclude, secured=secured,
+ output_path=output_path, include=include, registry_secret=registry_secret, tls=tls, env=env,
+ namespace=namespace, templates_path=templates_path).process_values()
+
+
+class CloudHarnessHelm:
+ def __init__(self, root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True,
+ output_path='./deployment', include=None, registry_secret=None, tls=True, env=None,
+ namespace=None, templates_path=HELM_PATH):
+ assert domain, 'A domain must be specified'
+ self.root_paths = [Path(r) for r in root_paths]
+ self.tag = tag
+ if not registry.endswith('/'):
+ self.registry = f'{registry}/'
+ else:
+ self.registry = registry
+ self.local = local
+ self.domain = domain
+ self.exclude = exclude
+ self.secured = secured
+ self.output_path = Path(output_path)
+ self.include = include
+ self.registry_secret = registry_secret
+ self.tls = tls
+ self.env = env
+ self.namespace = namespace
+
+ self.templates_path = templates_path
+ self.dest_deployment_path = self.output_path / templates_path
+ self.helm_chart_path = self.dest_deployment_path / 'Chart.yaml'
+ self.__init_deployment()
+
+ self.static_images = set()
+ self.base_images = {}
+ self.all_images = {}
+
+ def __init_deployment(self):
+ """
+ Create the base helm chart
+ """
+ if self.dest_deployment_path.exists():
+ shutil.rmtree(self.dest_deployment_path)
+ # Initialize with default
+ copy_merge_base_deployment(self.dest_deployment_path, Path(CH_ROOT) / DEPLOYMENT_CONFIGURATION_PATH / self.templates_path)
+
+ # Override for every cloudharness scaffolding
+ for root_path in self.root_paths:
+ copy_merge_base_deployment(dest_helm_chart_path=self.dest_deployment_path,
+ base_helm_chart=root_path / DEPLOYMENT_CONFIGURATION_PATH /self.templates_path)
+ collect_apps_helm_templates(root_path, exclude=self.exclude, include=self.include,
+ dest_helm_chart_path=self.dest_deployment_path, templates_path=self.templates_path)
+
+ def __adjust_missing_values(self, helm_values):
+ if 'name' not in helm_values:
+ with open(self.helm_chart_path) as f:
+ chart_idx_content = yaml.safe_load(f)
+ helm_values['name'] = chart_idx_content['name'].lower()
+
+ def process_values(self) -> HarnessMainConfig:
+ """
+ Creates values file for the helm chart
+ """
+ helm_values = self.__get_default_helm_values()
+
+ self.__adjust_missing_values(helm_values)
+
+ helm_values = self.__merge_base_helm_values(helm_values)
+
+ helm_values[KEY_APPS] = {}
+
+ base_image_name = helm_values['name']
+
+ helm_values[KEY_TASK_IMAGES] = {}
+
+ self.__init_base_images(base_image_name)
+ self.__init_static_images(base_image_name)
+ helm_values[KEY_TEST_IMAGES] = self.__init_test_images(base_image_name)
+
+ self.__process_applications(helm_values, base_image_name)
+
+ # self.create_tls_certificate(helm_values)
+
+ values, include = self.__finish_helm_values(values=helm_values)
+
+ # Adjust dependencies from static (common) images
+ self.__assign_static_build_dependencies(helm_values)
+
+ for root_path in self.root_paths:
+ collect_apps_helm_templates(root_path, exclude=self.exclude, include=self.include,
+ dest_helm_chart_path=self.dest_deployment_path, templates_path=self.templates_path)
+
+ # Save values file for manual helm chart
+ merged_values = merge_to_yaml_file(helm_values, self.dest_deployment_path / VALUES_MANUAL_PATH)
+ if self.namespace:
+ merge_to_yaml_file({'metadata': {'namespace': self.namespace},
+ 'name': helm_values['name']}, self.helm_chart_path)
+ validate_helm_values(merged_values)
+ return HarnessMainConfig.from_dict(merged_values)
+
+ def __process_applications(self, helm_values, base_image_name):
+ for root_path in self.root_paths:
+ app_values = init_app_values(
+ root_path, exclude=self.exclude, values=helm_values[KEY_APPS])
+ helm_values[KEY_APPS] = dict_merge(helm_values[KEY_APPS],
+ app_values)
+
+ app_base_path = root_path / APPS_PATH
+ app_values = self.collect_app_values(
+ f"{app_base_path}", base_image_name=base_image_name)
+ helm_values[KEY_APPS] = dict_merge(helm_values[KEY_APPS],
+ app_values)
+
+ def collect_app_values(self, app_base_path, base_image_name=None):
+ values = {}
+
+ for app_path in get_sub_paths(app_base_path):
+ app_name = app_name_from_path(
+ os.path.relpath(app_path, app_base_path))
+
+ if app_name in self.exclude:
+ continue
+ app_key = app_name.replace('-', '_')
+
+ app_values = self.create_app_values_spec(app_name, app_path, base_image_name=base_image_name)
+
+ values[app_key] = dict_merge(
+ values[app_key], app_values) if app_key in values else app_values
+
+ return values
+
+ def __init_static_images(self, base_image_name):
+ for static_img_dockerfile in self.static_images:
+ img_name = image_name_from_dockerfile_path(os.path.basename(
+ static_img_dockerfile), base_name=base_image_name)
+ self.base_images[os.path.basename(static_img_dockerfile)] = self.image_tag(
+ img_name, build_context_path=static_img_dockerfile)
+
+ def __assign_static_build_dependencies(self, helm_values):
+ for static_img_dockerfile in self.static_images:
+ key = os.path.basename(static_img_dockerfile)
+ if key in helm_values[KEY_TASK_IMAGES]:
+ dependencies = guess_build_dependencies_from_dockerfile(
+ static_img_dockerfile)
+ for dep in dependencies:
+ if dep in self.base_images and dep not in helm_values[KEY_TASK_IMAGES]:
+ helm_values[KEY_TASK_IMAGES][dep] = self.base_images[dep]
+
+ for image_name in list(helm_values[KEY_TASK_IMAGES].keys()):
+ if image_name in self.exclude:
+ del helm_values[KEY_TASK_IMAGES][image_name]
+
+ def __init_base_images(self, base_image_name):
+
+ for root_path in self.root_paths:
+ for base_img_dockerfile in self.__find_static_dockerfile_paths(root_path):
+ img_name = image_name_from_dockerfile_path(
+ os.path.basename(base_img_dockerfile), base_name=base_image_name)
+ self.base_images[os.path.basename(base_img_dockerfile)] = self.image_tag(
+ img_name, build_context_path=root_path)
+
+ self.static_images.update(find_dockerfiles_paths(
+ os.path.join(root_path, STATIC_IMAGES_PATH)))
+ return self.base_images
+
+ def __init_test_images(self, base_image_name):
+ test_images = {}
+ for root_path in self.root_paths:
+ for base_img_dockerfile in find_dockerfiles_paths(os.path.join(root_path, TEST_IMAGES_PATH)):
+ img_name = image_name_from_dockerfile_path(
+ os.path.basename(base_img_dockerfile), base_name=base_image_name)
+ test_images[os.path.basename(base_img_dockerfile)] = self.image_tag(
+ img_name, build_context_path=base_img_dockerfile)
+
+ return test_images
+
+
+ def __find_static_dockerfile_paths(self, root_path):
+ return find_dockerfiles_paths(os.path.join(root_path, BASE_IMAGES_PATH)) + find_dockerfiles_paths(os.path.join(root_path, STATIC_IMAGES_PATH))
+
+ def __merge_base_helm_values(self, helm_values):
+ # Override for every cloudharness scaffolding
+ for root_path in self.root_paths:
+ helm_values = dict_merge(
+ helm_values,
+ collect_helm_values(root_path, env=self.env)
+ )
+
+ return helm_values
+
+ def __get_default_helm_values(self):
+ helm_values = get_template(os.path.join(
+ CH_ROOT, DEPLOYMENT_CONFIGURATION_PATH, HELM_PATH, 'values.yaml'))
+ helm_values = dict_merge(helm_values,
+ collect_helm_values(CH_ROOT, env=self.env))
+
+ return helm_values
+
+ def create_tls_certificate(self, helm_values):
+ if not self.tls:
+ helm_values['tls'] = None
+ return
+ if not self.local:
+ return
+ helm_values['tls'] = self.domain.replace(".", "-") + "-tls"
+
+ bootstrap_file = 'bootstrap.sh'
+ certs_parent_folder_path = self.output_path / 'helm' / 'resources'
+ certs_folder_path = certs_parent_folder_path / 'certs'
+
+ # if os.path.exists(os.path.join(certs_folder_path)):
+ if certs_folder_path.exists():
+ # don't overwrite the certificate if it exists
+ return
+
+ try:
+ client = DockerClient()
+ client.ping()
+ except:
+ raise ConnectionRefusedError(
+ '\n\nIs docker running? Run "eval(minikube docker-env)" if you are using minikube...')
+
+ # Create CA and sign cert for domain
+ container = client.containers.run(image='frapsoft/openssl',
+ command=f'sleep 60',
+ entrypoint="",
+ detach=True,
+ environment=[
+ f"DOMAIN={self.domain}"],
+ )
+
+ container.exec_run('mkdir -p /mnt/vol1')
+ container.exec_run('mkdir -p /mnt/certs')
+
+ # copy bootstrap file
+ cur_dir = os.getcwd()
+ os.chdir(os.path.join(HERE, 'scripts'))
+ tar = tarfile.open(bootstrap_file + '.tar', mode='w')
+ try:
+ tar.add(bootstrap_file)
+ finally:
+ tar.close()
+ data = open(bootstrap_file + '.tar', 'rb').read()
+ container.put_archive('/mnt/vol1', data)
+ os.chdir(cur_dir)
+ container.exec_run(f'tar x {bootstrap_file}.tar', workdir='/mnt/vol1')
+
+ # exec bootstrap file
+ container.exec_run(f'/bin/ash /mnt/vol1/{bootstrap_file}')
+
+ # retrieve the certs from the container
+ bits, stat = container.get_archive('/mnt/certs')
+ if not certs_folder_path.exists():
+ certs_folder_path.mkdir(parents=True)
+ with open(certs_parent_folder_path / 'certs.tar', 'wb') as f:
+ for chunk in bits:
+ f.write(chunk)
+ cf = tarfile.open(f'{certs_parent_folder_path}/certs.tar')
+ cf.extractall(path=certs_parent_folder_path)
+
+ logs = container.logs()
+ logging.info(f'openssl container logs: {logs}')
+
+ # stop the container
+ container.kill()
+
+ logging.info("Created certificates for local deployment")
+
+ def __finish_helm_values(self, values):
+ """
+ Sets default overridden values
+ """
+ if self.registry:
+ logging.info(f"Registry set: {self.registry}")
+ if self.local:
+ values['registry']['secret'] = ''
+ if self.registry_secret:
+ logging.info(f"Registry secret set")
+ values['registry']['name'] = self.registry
+ values['registry']['secret'] = self.registry_secret
+ values['tag'] = self.tag
+ if self.namespace:
+ values['namespace'] = self.namespace
+ values['secured_gatekeepers'] = self.secured
+ values['ingress']['ssl_redirect'] = values['ingress']['ssl_redirect'] and self.tls
+ values['tls'] = self.tls
+ if self.domain:
+ values['domain'] = self.domain
+
+ values['local'] = self.local
+ if self.local:
+ try:
+ values['localIp'] = get_cluster_ip()
+ except subprocess.TimeoutExpired:
+ logging.warning("Minikube not available")
+ except:
+ logging.warning("Kubectl not available")
+
+ apps = values[KEY_APPS]
+
+ for app_key in apps:
+ v = apps[app_key]
+
+ values_from_legacy(v)
+ assert KEY_HARNESS in v, 'Default app value loading is broken'
+
+ app_name = app_key.replace('_', '-')
+ harness = v[KEY_HARNESS]
+ harness['name'] = app_name
+
+ if not harness[KEY_SERVICE].get('name', None):
+ harness[KEY_SERVICE]['name'] = app_name
+ if not harness[KEY_DEPLOYMENT].get('name', None):
+ harness[KEY_DEPLOYMENT]['name'] = app_name
+
+ if harness[KEY_DATABASE] and not harness[KEY_DATABASE].get('name', None):
+ harness[KEY_DATABASE]['name'] = app_name.strip() + '-db'
+
+ self.__clear_unused_db_configuration(harness)
+ values_set_legacy(v)
+
+ if self.include:
+ self.include = get_included_with_dependencies(
+ values, set(self.include))
+ logging.info('Selecting included applications')
+
+ for v in [v for v in apps]:
+ if apps[v]['harness']['name'] not in self.include:
+ del apps[v]
+ continue
+ values[KEY_TASK_IMAGES].update(apps[v][KEY_TASK_IMAGES])
+ # Create environment variables
+ else:
+ for v in [v for v in apps]:
+ values[KEY_TASK_IMAGES].update(apps[v][KEY_TASK_IMAGES])
+ create_env_variables(values)
+ return values, self.include
+
+ def __clear_unused_db_configuration(self, harness_config):
+ database_config = harness_config[KEY_DATABASE]
+ database_type = database_config.get('type', None)
+ if database_type is None:
+ del harness_config[KEY_DATABASE]
+ return
+ db_specific_keys = [k for k, v in database_config.items()
+ if isinstance(v, dict) and 'image' in v and 'ports' in v]
+ for db in db_specific_keys:
+ if database_type != db:
+ del database_config[db]
+
+ def image_tag(self, image_name, build_context_path=None, dependencies=()):
+ tag = self.tag
+ if tag is None and not self.local:
+ logging.info(f"Generating tag for {image_name} from {build_context_path} and {dependencies}")
+ ignore_path = os.path.join(build_context_path, '.dockerignore')
+ ignore = set(DEFAULT_IGNORE)
+ if os.path.exists(ignore_path):
+ with open(ignore_path) as f:
+ ignore = ignore.union({line.strip() for line in f})
+ logging.info(f"Ignoring {ignore}")
+ tag = generate_tag_from_content(build_context_path, ignore)
+ logging.info(f"Content hash: {tag}")
+ dependencies = dependencies or guess_build_dependencies_from_dockerfile(build_context_path)
+ tag = sha1((tag + "".join(self.all_images.get(n , '') for n in dependencies)).encode("utf-8")).hexdigest()
+ logging.info(f"Generated tag: {tag}")
+ app_name = image_name.split("/")[-1] # the image name can have a prefix
+ self.all_images[app_name] = tag
+ return self.registry + image_name + (f':{tag}' if tag else '')
+
+ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
+ logging.info('Generating values script for ' + app_name)
+
+ specific_template_path = os.path.join(app_path, 'deploy', 'values.yaml')
+ if os.path.exists(specific_template_path):
+ logging.info("Specific values template found: " +
+ specific_template_path)
+ values = get_template(specific_template_path)
+ else:
+ values = {}
+
+ for e in self.env:
+ specific_template_path = os.path.join(
+ app_path, 'deploy', f'values-{e}.yaml')
+ if os.path.exists(specific_template_path):
+ logging.info(
+ "Specific environment values template found: " + specific_template_path)
+ with open(specific_template_path) as f:
+ values_env_specific = yaml.safe_load(f)
+ values = dict_merge(values, values_env_specific)
+
+ if KEY_HARNESS in values and 'name' in values[KEY_HARNESS] and values[KEY_HARNESS]['name']:
+ logging.warning('Name is automatically set in applications: name %s will be ignored',
+ values[KEY_HARNESS]['name'])
+
+ image_paths = [path for path in find_dockerfiles_paths(
+ app_path) if 'tasks/' not in path and 'subapps' not in path]
+ if len(image_paths) > 1:
+ logging.warning('Multiple Dockerfiles found in application %s. Picking the first one: %s', app_name,
+ image_paths[0])
+ if KEY_HARNESS in values and 'dependencies' in values[KEY_HARNESS] and 'build' in values[KEY_HARNESS]['dependencies']:
+ build_dependencies = values[KEY_HARNESS]['dependencies']['build']
+ else:
+ build_dependencies = []
+
+ if len(image_paths) > 0:
+ image_name = image_name_from_dockerfile_path(os.path.relpath(
+ image_paths[0], os.path.dirname(app_path)), base_image_name)
+
+ values['image'] = self.image_tag(
+ image_name, build_context_path=app_path, dependencies=build_dependencies)
+ elif KEY_HARNESS in values and not values[KEY_HARNESS].get(KEY_DEPLOYMENT, {}).get('image', None) and values[
+ KEY_HARNESS].get(KEY_DEPLOYMENT, {}).get('auto', False):
+ raise Exception(f"At least one Dockerfile must be specified on application {app_name}. "
+ f"Specify harness.deployment.image value if you intend to use a prebuilt image.")
+
+ task_images_paths = [path for path in find_dockerfiles_paths(
+ app_path) if 'tasks/' in path]
+ values[KEY_TASK_IMAGES] = values.get(KEY_TASK_IMAGES, {})
+
+ if build_dependencies:
+ for build_dependency in values[KEY_HARNESS]['dependencies']['build']:
+ if build_dependency in self.base_images:
+ values[KEY_TASK_IMAGES][build_dependency] = self.base_images[build_dependency]
+
+ for task_path in task_images_paths:
+ task_name = app_name_from_path(os.path.relpath(
+ task_path, os.path.dirname(app_path)))
+ img_name = image_name_from_dockerfile_path(task_name, base_image_name)
+
+ values[KEY_TASK_IMAGES][task_name] = self.image_tag(
+ img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys())
+
+ return values
+
+
+def get_included_with_dependencies(values, include):
+ app_values = values['apps'].values()
+ directly_included = [app for app in app_values if any(
+ inc == app[KEY_HARNESS]['name'] for inc in include)]
+
+ dependent = set(include)
+ for app in directly_included:
+ if app['harness']['dependencies'].get('hard', None):
+ dependent.update(set(app[KEY_HARNESS]['dependencies']['hard']))
+ if app['harness']['dependencies'].get('soft', None):
+ dependent.update(set(app[KEY_HARNESS]['dependencies']['soft']))
+ if values['secured_gatekeepers'] and app[KEY_HARNESS]['secured']:
+ dependent.add('accounts')
+ if len(dependent) == len(include):
+ return dependent
+ return get_included_with_dependencies(values, dependent)
+
+
+def merge_helm_chart(source_templates_path, dest_helm_chart_path=HELM_CHART_PATH):
+ pass
+
+
+def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_path, exclude=(), include=None):
+ """
+ Searches recursively for helm templates inside the applications and collects the templates in the destination
+
+ :param search_root:
+ :param dest_helm_chart_path: collected helm templates destination folder
+ :param exclude:
+ :return:
+ """
+ app_base_path = os.path.join(search_root, APPS_PATH)
+
+ for app_path in get_sub_paths(app_base_path):
+ app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
+ if app_name in exclude or (include and not any(inc in app_name for inc in include)):
+ continue
+ template_dir = os.path.join(app_path, 'deploy', 'templates', templates_path)
+ if os.path.exists(template_dir):
+ dest_dir = os.path.join(
+ dest_helm_chart_path, 'templates', app_name)
+
+ logging.info(
+ "Collecting templates for application %s to %s", app_name, dest_dir)
+ if os.path.exists(dest_dir):
+ logging.warning(
+ "Merging/overriding all files in directory %s", dest_dir)
+ merge_configuration_directories(template_dir, dest_dir)
+ else:
+ shutil.copytree(template_dir, dest_dir)
+ resources_dir = os.path.join(app_path, 'deploy/resources')
+ if os.path.exists(resources_dir):
+ dest_dir = os.path.join(
+ dest_helm_chart_path, 'resources', app_name)
+
+ logging.info(
+ "Collecting resources for application %s to %s", app_name, dest_dir)
+
+ merge_configuration_directories(resources_dir, dest_dir)
+
+ subchart_dir = os.path.join(app_path, 'deploy/charts')
+ if os.path.exists(subchart_dir):
+ dest_dir = os.path.join(dest_helm_chart_path, 'charts', app_name)
+
+ logging.info(
+ "Collecting templates for application %s to %s", app_name, dest_dir)
+ if os.path.exists(dest_dir):
+ logging.warning(
+ "Merging/overriding all files in directory %s", dest_dir)
+ merge_configuration_directories(subchart_dir, dest_dir)
+ else:
+ shutil.copytree(subchart_dir, dest_dir)
+
+
+def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart):
+ if not os.path.exists(base_helm_chart):
+ return
+ if os.path.exists(dest_helm_chart_path):
+ logging.info("Merging/overriding all files in directory %s",
+ dest_helm_chart_path)
+ merge_configuration_directories(f"{base_helm_chart}", f"{dest_helm_chart_path}")
+ else:
+ logging.info("Copying base deployment chart from %s to %s",
+ base_helm_chart, dest_helm_chart_path)
+ shutil.copytree(base_helm_chart, dest_helm_chart_path)
+
+
+def collect_helm_values(deployment_root, env=()):
+ """
+ Creates helm values from a cloudharness deployment scaffolding
+ """
+
+ values_template_path = os.path.join(
+ deployment_root, DEPLOYMENT_CONFIGURATION_PATH, 'values-template.yaml')
+
+ values = get_template(values_template_path)
+
+ for e in env:
+ specific_template_path = os.path.join(deployment_root, DEPLOYMENT_CONFIGURATION_PATH,
+ f'values-template-{e}.yaml')
+ if os.path.exists(specific_template_path):
+ logging.info(
+ "Specific environment values template found: " + specific_template_path)
+ with open(specific_template_path) as f:
+ values_env_specific = yaml.safe_load(f)
+ values = dict_merge(values, values_env_specific)
+ return values
+
+
+def init_app_values(deployment_root, exclude, values=None):
+ values = values if values is not None else {}
+ app_base_path = os.path.join(deployment_root, APPS_PATH)
+ overridden_template_path = os.path.join(
+ deployment_root, DEPLOYMENT_CONFIGURATION_PATH, 'value-template.yaml')
+ default_values_path = os.path.join(
+ CH_ROOT, DEPLOYMENT_CONFIGURATION_PATH, 'value-template.yaml')
+
+ for app_path in get_sub_paths(app_base_path):
+
+ app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
+
+ if app_name in exclude:
+ continue
+ app_key = app_name.replace('-', '_')
+ if app_key not in values:
+ default_values = get_template(default_values_path)
+ values[app_key] = default_values
+ overridden_defaults = get_template(overridden_template_path)
+ values[app_key] = dict_merge(values[app_key], overridden_defaults)
+
+ return values
+
+
+def values_from_legacy(values):
+ if KEY_HARNESS not in values:
+ values[KEY_HARNESS] = {}
+ harness = values[KEY_HARNESS]
+ if KEY_SERVICE not in harness:
+ harness[KEY_SERVICE] = {}
+ if KEY_DEPLOYMENT not in harness:
+ harness[KEY_DEPLOYMENT] = {}
+ if KEY_DATABASE not in harness:
+ harness[KEY_DATABASE] = {}
+
+ if 'subdomain' in values:
+ harness['subdomain'] = values['subdomain']
+ if 'autodeploy' in values:
+ harness[KEY_DEPLOYMENT]['auto'] = values['autodeploy']
+ if 'autoservice' in values:
+ harness[KEY_SERVICE]['auto'] = values['autoservice']
+ if 'secureme' in values:
+ harness['secured'] = values['secureme']
+ if 'resources' in values:
+ harness[KEY_DEPLOYMENT]['resources'].update(values['resources'])
+ if 'replicas' in values:
+ harness[KEY_DEPLOYMENT]['replicas'] = values['replicas']
+ if 'image' in values:
+ harness[KEY_DEPLOYMENT]['image'] = values['image']
+ if 'port' in values:
+ harness[KEY_DEPLOYMENT]['port'] = values['port']
+ harness[KEY_SERVICE]['port'] = values['port']
+
+
+def values_set_legacy(values):
+ harness = values[KEY_HARNESS]
+ if 'image' in harness[KEY_DEPLOYMENT]:
+ values['image'] = harness[KEY_DEPLOYMENT]['image']
+
+ values['name'] = harness['name']
+ if harness[KEY_DEPLOYMENT].get('port', None):
+ values['port'] = harness[KEY_DEPLOYMENT]['port']
+ if 'resources' in harness[KEY_DEPLOYMENT]:
+ values['resources'] = harness[KEY_DEPLOYMENT]['resources']
+
+
+def generate_tag_from_content(content_path, ignore=()):
+ from dirhash import dirhash
+ return dirhash(content_path, 'sha1', ignore=ignore)
+
+
+def extract_env_variables_from_values(values, envs=tuple(), prefix=''):
+ if isinstance(values, dict):
+ newenvs = list(envs)
+ for key, value in values.items():
+ v = extract_env_variables_from_values(
+ value, envs, f"{prefix}_{key}".replace('-', '_').upper())
+ if key in ('name', 'port', 'subdomain'):
+ newenvs.extend(v)
+ return newenvs
+ else:
+ return [env_variable(prefix, values)]
+
+
+def create_env_variables(values):
+ for app_name, value in values[KEY_APPS].items():
+ if KEY_HARNESS in value:
+ values['env'].extend(extract_env_variables_from_values(
+ value[KEY_HARNESS], prefix='CH_' + app_name))
+ values['env'].append(env_variable('CH_DOMAIN', values['domain']))
+ values['env'].append(env_variable(
+ 'CH_IMAGE_REGISTRY', values['registry']['name']))
+ values['env'].append(env_variable('CH_IMAGE_TAG', values['tag']))
+
+
+def hosts_info(values):
+ domain = values['domain']
+ namespace = values['namespace']
+ subdomains = [app[KEY_HARNESS]['subdomain'] for app in values[KEY_APPS].values() if
+ KEY_HARNESS in app and app[KEY_HARNESS]['subdomain']] + [alias for app in values[KEY_APPS].values() if
+ KEY_HARNESS in app and app[KEY_HARNESS]['aliases'] for alias in app[KEY_HARNESS]['aliases']]
+ try:
+ ip = get_cluster_ip()
+ except:
+ logging.warning('Cannot get cluster ip')
+ return
+ logging.info(
+ "\nTo test locally, update your hosts file" + f"\n{ip}\t{domain + ' ' + ' '.join(sd + '.' + domain for sd in subdomains)}")
+
+ deployments = (app[KEY_HARNESS][KEY_DEPLOYMENT]['name']
+ for app in values[KEY_APPS].values() if KEY_HARNESS in app)
+
+ logging.info(
+ "\nTo run locally some apps, also those references may be needed")
+ for appname in values[KEY_APPS]:
+ app = values[KEY_APPS][appname]['harness']
+ if 'deployment' not in app:
+ continue
+ print(
+ "kubectl port-forward -n {namespace} deployment/{app} {port}:{port}".format(
+ app=app['deployment']['name'], port=app['deployment']['port'], namespace=namespace))
+
+ print(
+ f"127.0.0.1\t{' '.join('%s.%s' % (s, values['namespace']) for s in deployments)}")
+
+
+class ValuesValidationException(Exception):
+ pass
+
+
+def validate_helm_values(values):
+ validate_dependencies(values)
+
+
+def validate_dependencies(values):
+ all_apps = {a for a in values["apps"]}
+ for app in all_apps:
+ app_values = values["apps"][app]
+ if 'dependencies' in app_values[KEY_HARNESS]:
+ soft_dependencies = {
+ d.replace("-", "_") for d in app_values[KEY_HARNESS]['dependencies']['soft']}
+ not_found = {d for d in soft_dependencies if d not in all_apps}
+ if not_found:
+ logging.warning(
+ f"Soft dependencies specified for application {app} not found: {','.join(not_found)}")
+ hard_dependencies = {
+ d.replace("-", "_") for d in app_values[KEY_HARNESS]['dependencies']['hard']}
+ not_found = {d for d in hard_dependencies if d not in all_apps}
+ if not_found:
+ raise ValuesValidationException(
+ f"Bad application dependencies specified for application {app}: {','.join(not_found)}")
+
+ build_dependencies = {
+ d for d in app_values[KEY_HARNESS]['dependencies']['build']}
+
+ not_found = {
+ d for d in build_dependencies if d not in values[KEY_TASK_IMAGES]}
+ not_found = {d for d in not_found if d not in all_apps}
+ if not_found:
+ raise ValuesValidationException(
+ f"Bad build dependencies specified for application {app}: {','.join(not_found)} not found as built image")
+
+ if 'use_services' in app_values[KEY_HARNESS]:
+ service_dependencies = {d['name'].replace(
+ "-", "_") for d in app_values[KEY_HARNESS]['use_services']}
+
+ not_found = {d for d in service_dependencies if d not in all_apps}
+ if not_found:
+ raise ValuesValidationException(
+ f"Bad service application dependencies specified for application {app}: {','.join(not_found)}")
diff --git a/tools/deployment-cli-tools/ch_cli_tools/helm.py b/tools/deployment-cli-tools/ch_cli_tools/helm.py
index 4c75a909..9bd43b8c 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/helm.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/helm.py
@@ -212,7 +212,7 @@ def __init_base_images(self, base_image_name):
self.static_images.update(find_dockerfiles_paths(
os.path.join(root_path, STATIC_IMAGES_PATH)))
return self.base_images
-
+
def __init_test_images(self, base_image_name):
test_images = {}
for root_path in self.root_paths:
@@ -224,7 +224,7 @@ def __init_test_images(self, base_image_name):
return test_images
-
+
def __find_static_dockerfile_paths(self, root_path):
return find_dockerfiles_paths(os.path.join(root_path, BASE_IMAGES_PATH)) + find_dockerfiles_paths(os.path.join(root_path, STATIC_IMAGES_PATH))
@@ -417,7 +417,7 @@ def image_tag(self, image_name, build_context_path=None, dependencies=()):
app_name = image_name.split("/")[-1] # the image name can have a prefix
self.all_images[app_name] = tag
return self.registry + image_name + (f':{tag}' if tag else '')
-
+
def create_app_values_spec(self, app_name, app_path, base_image_name=None):
logging.info('Generating values script for ' + app_name)
@@ -456,7 +456,7 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
if len(image_paths) > 0:
image_name = image_name_from_dockerfile_path(os.path.relpath(
image_paths[0], os.path.dirname(app_path)), base_image_name)
-
+
values['image'] = self.image_tag(
image_name, build_context_path=app_path, dependencies=build_dependencies)
elif KEY_HARNESS in values and not values[KEY_HARNESS].get(KEY_DEPLOYMENT, {}).get('image', None) and values[
@@ -521,7 +521,7 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=(), i
app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
- template_dir = os.path.join(app_path, 'deploy/templates')
+ template_dir = os.path.join(app_path, 'deploy', 'templates', HELM_PATH)
if os.path.exists(template_dir):
dest_dir = os.path.join(
dest_helm_chart_path, 'templates', app_name)
diff --git a/tools/deployment-cli-tools/harness-deployment b/tools/deployment-cli-tools/harness-deployment
index a9cecabb..d8aaebda 100644
--- a/tools/deployment-cli-tools/harness-deployment
+++ b/tools/deployment-cli-tools/harness-deployment
@@ -4,6 +4,7 @@ import logging
import sys
import os
+from ch_cli_tools.dockercompose import create_docker_compose_configuration
from ch_cli_tools.helm import create_helm_chart, hosts_info, deploy
from ch_cli_tools.skaffold import create_skaffold_configuration, create_vscode_debug_configuration
from ch_cli_tools.codefresh import create_codefresh_deployment_scripts, write_env_file
@@ -61,6 +62,8 @@ if __name__ == "__main__":
help=f'Do not generate ci/cd files')
parser.add_argument('-we', '--write-env', dest='write_env', action="store_const", default=None, const=True,
help=f'Write build env to .env file in {DEPLOYMENT_PATH}')
+ parser.add_argument('--docker-compose', dest='docker_compose', action="store_true",
+ help='Generate docker-compose.yaml and dedicated Skaffold configuration')
args, unknown = parser.parse_known_args(sys.argv[1:])
@@ -81,7 +84,24 @@ if __name__ == "__main__":
merge_app_directories(root_paths, destination=args.merge)
root_paths = [args.merge]
- helm_values = create_helm_chart(
+ # helm_values = create_helm_chart(
+ # root_paths,
+ # tag=args.tag,
+ # registry=args.registry,
+ # domain=args.domain,
+ # local=args.local,
+ # secured=not args.unsecured,
+ # output_path=args.output_path,
+ # exclude=args.exclude,
+ # include=args.include,
+ # registry_secret=args.registry_secret,
+ # tls=not args.no_tls,
+ # env=envs,
+ # namespace=args.namespace
+ # )
+
+ if not args.docker_compose:
+ helm_values = create_helm_chart(
root_paths,
tag=args.tag,
registry=args.registry,
@@ -96,6 +116,23 @@ if __name__ == "__main__":
env=envs,
namespace=args.namespace
)
+ else:
+ helm_values = create_docker_compose_configuration(
+ root_paths,
+ tag=args.tag,
+ registry=args.registry,
+ domain=args.domain,
+ local=args.local,
+ secured=not args.unsecured,
+ output_path=args.output_path,
+ exclude=args.exclude,
+ include=args.include,
+ registry_secret=args.registry_secret,
+ tls=not args.no_tls,
+ env=envs,
+ namespace=args.namespace,
+ templates_path="compose",
+ )
merged_root_paths = preprocess_build_overrides(
root_paths=root_paths, helm_values=helm_values)
@@ -108,7 +145,7 @@ if __name__ == "__main__":
envs=envs,
base_image_name=helm_values['name'],
helm_values=helm_values)
-
+
if args.write_env:
write_env_file(helm_values, os.path.join(root_paths[-1], DEPLOYMENT_PATH, ".env"))
From 6f306a18a2f07a6ba78501946d9be3887244b162 Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 7 Feb 2024 09:24:29 -0600
Subject: [PATCH 011/210] Squashed commit of the following:
commit c698bbadf4f5cf41a59818d3738258fb29919249
Author: aranega
Date: Wed Feb 7 08:55:45 2024 -0600
CH-100 Add second path using pathlib
commit 0422bfe9860f272354c1faadd851d37b4976650a
Author: aranega
Date: Wed Feb 7 07:33:43 2024 -0600
CH-100 Add first port to pathlib
---
.../ch_cli_tools/dockercompose.py | 92 +++++++++----------
tools/deployment-cli-tools/harness-deployment | 1 -
2 files changed, 45 insertions(+), 48 deletions(-)
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 39ff0272..06bf6d23 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -31,16 +31,16 @@
KEY_TEST_IMAGES = 'test-images'
DEFAULT_IGNORE = ('/tasks', '.dockerignore', '.hypothesis', "__pycache__", '.node_modules', 'dist', 'build', '.coverage')
-
+COMPOSE = 'compose'
def create_docker_compose_configuration(root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True,
output_path='./deployment', include=None, registry_secret=None, tls=True, env=None,
- namespace=None, templates_path=HELM_PATH) -> HarnessMainConfig:
+ namespace=None) -> HarnessMainConfig:
if (type(env)) == str:
env = [env]
return CloudHarnessHelm(root_paths, tag=tag, registry=registry, local=local, domain=domain, exclude=exclude, secured=secured,
output_path=output_path, include=include, registry_secret=registry_secret, tls=tls, env=env,
- namespace=namespace, templates_path=templates_path).process_values()
+ namespace=namespace, templates_path=COMPOSE).process_values()
class CloudHarnessHelm:
@@ -146,16 +146,15 @@ def __process_applications(self, helm_values, base_image_name):
app_base_path = root_path / APPS_PATH
app_values = self.collect_app_values(
- f"{app_base_path}", base_image_name=base_image_name)
+ app_base_path, base_image_name=base_image_name)
helm_values[KEY_APPS] = dict_merge(helm_values[KEY_APPS],
app_values)
def collect_app_values(self, app_base_path, base_image_name=None):
values = {}
- for app_path in get_sub_paths(app_base_path):
- app_name = app_name_from_path(
- os.path.relpath(app_path, app_base_path))
+ for app_path in app_base_path.glob("*/"): # We get the sub-files that are directories
+ app_name = app_name_from_path(f"{app_path.relative_to(app_base_path)}")
if app_name in self.exclude:
continue
@@ -185,7 +184,7 @@ def __assign_static_build_dependencies(self, helm_values):
if dep in self.base_images and dep not in helm_values[KEY_TASK_IMAGES]:
helm_values[KEY_TASK_IMAGES][dep] = self.base_images[dep]
- for image_name in list(helm_values[KEY_TASK_IMAGES].keys()):
+ for image_name in helm_values[KEY_TASK_IMAGES].keys():
if image_name in self.exclude:
del helm_values[KEY_TASK_IMAGES][image_name]
@@ -228,10 +227,11 @@ def __merge_base_helm_values(self, helm_values):
return helm_values
def __get_default_helm_values(self):
- helm_values = get_template(os.path.join(
- CH_ROOT, DEPLOYMENT_CONFIGURATION_PATH, HELM_PATH, 'values.yaml'))
+ ch_root_path = Path(CH_ROOT)
+ values_yaml_path = ch_root_path / DEPLOYMENT_CONFIGURATION_PATH / HELM_PATH / 'values.yaml'
+ helm_values = get_template(values_yaml_path)
helm_values = dict_merge(helm_values,
- collect_helm_values(CH_ROOT, env=self.env))
+ collect_helm_values(ch_root_path, env=self.env))
return helm_values
@@ -273,7 +273,7 @@ def create_tls_certificate(self, helm_values):
# copy bootstrap file
cur_dir = os.getcwd()
- os.chdir(os.path.join(HERE, 'scripts'))
+ os.chdir(Path(HERE) / 'scripts')
tar = tarfile.open(bootstrap_file + '.tar', mode='w')
try:
tar.add(bootstrap_file)
@@ -291,10 +291,11 @@ def create_tls_certificate(self, helm_values):
bits, stat = container.get_archive('/mnt/certs')
if not certs_folder_path.exists():
certs_folder_path.mkdir(parents=True)
- with open(certs_parent_folder_path / 'certs.tar', 'wb') as f:
+ certs_tar = certs_parent_folder_path / 'certs.tar'
+ with open(certs_tar, 'wb') as f:
for chunk in bits:
f.write(chunk)
- cf = tarfile.open(f'{certs_parent_folder_path}/certs.tar')
+ cf = tarfile.open(certs_tar)
cf.extractall(path=certs_parent_folder_path)
logs = container.logs()
@@ -409,20 +410,19 @@ def image_tag(self, image_name, build_context_path=None, dependencies=()):
def create_app_values_spec(self, app_name, app_path, base_image_name=None):
logging.info('Generating values script for ' + app_name)
- specific_template_path = os.path.join(app_path, 'deploy', 'values.yaml')
- if os.path.exists(specific_template_path):
- logging.info("Specific values template found: " +
- specific_template_path)
+ deploy_path = app_path / 'deploy'
+ specific_template_path = deploy_path / 'values.yaml'
+ if specific_template_path.exists():
+ logging.info(f"Specific values template found: {specific_template_path}")
values = get_template(specific_template_path)
else:
values = {}
for e in self.env:
- specific_template_path = os.path.join(
- app_path, 'deploy', f'values-{e}.yaml')
- if os.path.exists(specific_template_path):
+ specific_template_path = deploy_path / f'values-{e}.yaml'
+ if specific_template_path.exists():
logging.info(
- "Specific environment values template found: " + specific_template_path)
+ f"Specific environment values template found: {specific_template_path}")
with open(specific_template_path) as f:
values_env_specific = yaml.safe_load(f)
values = dict_merge(values, values_env_specific)
@@ -433,6 +433,8 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
image_paths = [path for path in find_dockerfiles_paths(
app_path) if 'tasks/' not in path and 'subapps' not in path]
+ import ipdb; ipdb.set_trace() # fmt: skip
+
if len(image_paths) > 1:
logging.warning('Multiple Dockerfiles found in application %s. Picking the first one: %s', app_name,
image_paths[0])
@@ -463,7 +465,7 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
for task_path in task_images_paths:
task_name = app_name_from_path(os.path.relpath(
- task_path, os.path.dirname(app_path)))
+ task_path, app_path.parent))
img_name = image_name_from_dockerfile_path(task_name, base_image_name)
values[KEY_TASK_IMAGES][task_name] = self.image_tag(
@@ -503,53 +505,51 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat
:param exclude:
:return:
"""
- app_base_path = os.path.join(search_root, APPS_PATH)
+ app_base_path = search_root / APPS_PATH
- for app_path in get_sub_paths(app_base_path):
- app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
+ for app_path in app_base_path.glob("*/"): # We get the sub-files that are directories
+ app_name = app_name_from_path(os.path.relpath(f"{app_path}", app_base_path))
if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
- template_dir = os.path.join(app_path, 'deploy', 'templates', templates_path)
- if os.path.exists(template_dir):
- dest_dir = os.path.join(
- dest_helm_chart_path, 'templates', app_name)
+ template_dir = app_path / 'deploy' / 'templates' / templates_path
+ if template_dir.exists():
+ dest_dir = dest_helm_chart_path / 'templates' / app_name
logging.info(
"Collecting templates for application %s to %s", app_name, dest_dir)
- if os.path.exists(dest_dir):
+ if dest_dir.exists():
logging.warning(
"Merging/overriding all files in directory %s", dest_dir)
- merge_configuration_directories(template_dir, dest_dir)
+ merge_configuration_directories(f"{template_dir}", f"{dest_dir}")
else:
shutil.copytree(template_dir, dest_dir)
- resources_dir = os.path.join(app_path, 'deploy/resources')
- if os.path.exists(resources_dir):
- dest_dir = os.path.join(
- dest_helm_chart_path, 'resources', app_name)
+ resources_dir = app_path / 'deploy' / 'resources'
+ if resources_dir.exists():
+ dest_dir = dest_helm_chart_path / 'resources' / app_name
logging.info(
"Collecting resources for application %s to %s", app_name, dest_dir)
- merge_configuration_directories(resources_dir, dest_dir)
+ merge_configuration_directories(f"{resources_dir}", f"{dest_dir}")
- subchart_dir = os.path.join(app_path, 'deploy/charts')
- if os.path.exists(subchart_dir):
- dest_dir = os.path.join(dest_helm_chart_path, 'charts', app_name)
+ subchart_dir = app_path / 'deploy/charts'
+ if subchart_dir.exists():
+ dest_dir = dest_helm_chart_path / 'charts' / app_name
logging.info(
"Collecting templates for application %s to %s", app_name, dest_dir)
- if os.path.exists(dest_dir):
+ if dest_dir.exists():
logging.warning(
"Merging/overriding all files in directory %s", dest_dir)
- merge_configuration_directories(subchart_dir, dest_dir)
+ merge_configuration_directories(f"{subchart_dir}", f"{dest_dir}")
else:
shutil.copytree(subchart_dir, dest_dir)
def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart):
- if not os.path.exists(base_helm_chart):
+ if not base_helm_chart.exists():
return
- if os.path.exists(dest_helm_chart_path):
+ if dest_helm_chart_path.exists():
logging.info("Merging/overriding all files in directory %s",
dest_helm_chart_path)
merge_configuration_directories(f"{base_helm_chart}", f"{dest_helm_chart_path}")
@@ -563,9 +563,7 @@ def collect_helm_values(deployment_root, env=()):
"""
Creates helm values from a cloudharness deployment scaffolding
"""
-
- values_template_path = os.path.join(
- deployment_root, DEPLOYMENT_CONFIGURATION_PATH, 'values-template.yaml')
+ values_template_path = deployment_root / DEPLOYMENT_CONFIGURATION_PATH / 'values-template.yaml'
values = get_template(values_template_path)
diff --git a/tools/deployment-cli-tools/harness-deployment b/tools/deployment-cli-tools/harness-deployment
index d8aaebda..e5cf49f1 100644
--- a/tools/deployment-cli-tools/harness-deployment
+++ b/tools/deployment-cli-tools/harness-deployment
@@ -131,7 +131,6 @@ if __name__ == "__main__":
tls=not args.no_tls,
env=envs,
namespace=args.namespace,
- templates_path="compose",
)
merged_root_paths = preprocess_build_overrides(
From 6bbae19137873aa6970ae36d77680b2f0d750d3c Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 7 Feb 2024 10:24:48 -0600
Subject: [PATCH 012/210] CH-100 Add first skaffold dedicated generation for
docker compose
---
.../cloudharness_utils/constants.py | 2 +
.../ch_cli_tools/dockercompose.py | 47 +++-
.../ch_cli_tools/skaffoldcompose.py | 251 ++++++++++++++++++
tools/deployment-cli-tools/harness-deployment | 6 +-
4 files changed, 300 insertions(+), 6 deletions(-)
create mode 100644 tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
diff --git a/libraries/cloudharness-utils/cloudharness_utils/constants.py b/libraries/cloudharness-utils/cloudharness_utils/constants.py
index 4b42761a..168b7811 100644
--- a/libraries/cloudharness-utils/cloudharness_utils/constants.py
+++ b/libraries/cloudharness-utils/cloudharness_utils/constants.py
@@ -10,6 +10,8 @@
HELM_PATH = "helm"
HELM_CHART_PATH = HELM_PATH
+COMPOSE = 'compose'
+
INFRASTRUCTURE_PATH = 'infrastructure'
STATIC_IMAGES_PATH = os.path.join(INFRASTRUCTURE_PATH, 'common-images')
BASE_IMAGES_PATH = os.path.join(INFRASTRUCTURE_PATH, 'base-images')
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 06bf6d23..2cf768a4 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -15,10 +15,10 @@
from . import HERE, CH_ROOT
from cloudharness_utils.constants import TEST_IMAGES_PATH, VALUES_MANUAL_PATH, HELM_CHART_PATH, APPS_PATH, HELM_PATH, \
- DEPLOYMENT_CONFIGURATION_PATH, BASE_IMAGES_PATH, STATIC_IMAGES_PATH
+ DEPLOYMENT_CONFIGURATION_PATH, BASE_IMAGES_PATH, STATIC_IMAGES_PATH, COMPOSE
from .utils import get_cluster_ip, get_image_name, env_variable, get_sub_paths, guess_build_dependencies_from_dockerfile, image_name_from_dockerfile_path, \
get_template, merge_configuration_directories, merge_to_yaml_file, dict_merge, app_name_from_path, \
- find_dockerfiles_paths
+ find_dockerfiles_paths, find_file_paths
from .models import HarnessMainConfig
@@ -31,7 +31,6 @@
KEY_TEST_IMAGES = 'test-images'
DEFAULT_IGNORE = ('/tasks', '.dockerignore', '.hypothesis', "__pycache__", '.node_modules', 'dist', 'build', '.coverage')
-COMPOSE = 'compose'
def create_docker_compose_configuration(root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True,
output_path='./deployment', include=None, registry_secret=None, tls=True, env=None,
@@ -50,7 +49,7 @@ def __init__(self, root_paths, tag='latest', registry='', local=True, domain=Non
assert domain, 'A domain must be specified'
self.root_paths = [Path(r) for r in root_paths]
self.tag = tag
- if not registry.endswith('/'):
+ if registry and not registry.endswith('/'):
self.registry = f'{registry}/'
else:
self.registry = registry
@@ -433,7 +432,10 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
image_paths = [path for path in find_dockerfiles_paths(
app_path) if 'tasks/' not in path and 'subapps' not in path]
- import ipdb; ipdb.set_trace() # fmt: skip
+
+ # Inject entry points commands
+ for image_path in image_paths:
+ self.inject_entry_points_commands(values, image_path, app_path)
if len(image_paths) > 1:
logging.warning('Multiple Dockerfiles found in application %s. Picking the first one: %s', app_name,
@@ -474,6 +476,18 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
return values
+ def inject_entry_points_commands(self, helm_values, image_path, app_path):
+ context_path = os.path.relpath(image_path, '.')
+
+ mains_candidates = find_file_paths(context_path, '__main__.py')
+
+ task_main_file = identify_unicorn_based_main(mains_candidates, app_path)
+
+ if task_main_file:
+ helm_values[KEY_HARNESS]['deployment']['command'] = ['python']
+ helm_values[KEY_HARNESS]['deployment']['args'] = [f'/usr/src/app/{os.path.basename(task_main_file)}/__main__.py']
+
+
def get_included_with_dependencies(values, include):
app_values = values['apps'].values()
directly_included = [app for app in app_values if any(
@@ -749,3 +763,26 @@ def validate_dependencies(values):
if not_found:
raise ValuesValidationException(
f"Bad service application dependencies specified for application {app}: {','.join(not_found)}")
+
+
+def identify_unicorn_based_main(candidates, app_path):
+ import re
+ gunicorn_pattern = re.compile(r"gunicorn")
+ # sort candidates, shortest path first
+ for candidate in sorted(candidates,key=lambda x: len(x.split("/"))):
+ dockerfile_path = f"{candidate}/.."
+ while not os.path.exists(f"{dockerfile_path}/Dockerfile") and os.path.abspath(dockerfile_path) != os.path.abspath(app_path):
+ dockerfile_path += "/.."
+ dockerfile = f"{dockerfile_path}/Dockerfile"
+ if not os.path.exists(dockerfile):
+ continue
+ with open(dockerfile, 'r') as file:
+ if re.search(gunicorn_pattern, file.read()):
+ return candidate
+ requirements = f"{candidate}/../requirements.txt"
+ if not os.path.exists(requirements):
+ continue
+ with open(requirements, 'r') as file:
+ if re.search(gunicorn_pattern, file.read()):
+ return candidate
+ return None
\ No newline at end of file
diff --git a/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py b/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
new file mode 100644
index 00000000..27a4701a
--- /dev/null
+++ b/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
@@ -0,0 +1,251 @@
+import os
+import logging
+import json
+import time
+
+from os.path import join, relpath, basename, exists, abspath
+from cloudharness_model import ApplicationTestConfig, HarnessMainConfig
+
+from cloudharness_utils.constants import APPS_PATH, DEPLOYMENT_CONFIGURATION_PATH, \
+ BASE_IMAGES_PATH, STATIC_IMAGES_PATH, COMPOSE, HELM_PATH
+from .helm import KEY_APPS, KEY_HARNESS, KEY_DEPLOYMENT, KEY_TASK_IMAGES
+from .utils import get_template, dict_merge, find_dockerfiles_paths, app_name_from_path, \
+ find_file_paths, guess_build_dependencies_from_dockerfile, merge_to_yaml_file, get_json_template, get_image_name
+
+def relpath_if(p1, p2):
+ if os.path.isabs(p1):
+ return p1
+ return relpath(p1, p2)
+
+def create_skaffold_compose_configuration(root_paths, helm_values: HarnessMainConfig, output_path='.', manage_task_images=True):
+ skaffold_conf = get_template('skaffold-template.yaml', True)
+ apps = helm_values.apps
+ base_image_name = (helm_values.registry.name or "") + helm_values.name
+ artifacts = {}
+ overrides = {}
+
+ def remove_tag(image_name):
+ return image_name.split(":")[0]
+
+ def get_image_tag(name):
+ return f"{get_image_name(name, base_image_name)}"
+
+ builds = {}
+
+ def build_artifact(image_name, context_path, requirements=None, dockerfile_path=''):
+ artifact_spec = {
+ 'image': image_name,
+ 'context': context_path,
+ 'docker': {
+ 'dockerfile': join(dockerfile_path, 'Dockerfile'),
+ 'buildArgs': {
+ 'REGISTRY': helm_values.registry.name,
+ 'TAG': helm_values.tag,
+ 'NOCACHE': str(time.time())
+ },
+ 'ssh': 'default'
+ }
+ }
+ if requirements:
+ artifact_spec['requires'] = [{'image': get_image_tag(req), 'alias': req.replace('-', '_').upper()} for req
+ in requirements]
+ return artifact_spec
+
+
+ base_images = set()
+
+ def process_build_dockerfile(dockerfile_path, root_path, global_context=False, requirements=None, app_name=None):
+ if app_name is None:
+ app_name = app_name_from_path(basename(dockerfile_path))
+ if app_name in helm_values[KEY_TASK_IMAGES] or app_name.replace("-", "_") in helm_values.apps:
+ context_path = relpath_if(root_path, output_path) if global_context else relpath_if(dockerfile_path, output_path)
+
+ builds[app_name] = context_path
+ base_images.add(get_image_name(app_name))
+ artifacts[app_name] = build_artifact(
+ get_image_tag(app_name),
+ context_path,
+ dockerfile_path=relpath(dockerfile_path, output_path),
+ requirements=requirements or guess_build_dependencies_from_dockerfile(dockerfile_path)
+ )
+
+ for root_path in root_paths:
+ skaffold_conf = dict_merge(skaffold_conf, get_template(
+ join(root_path, DEPLOYMENT_CONFIGURATION_PATH, 'skaffold-template.yaml')))
+
+ base_dockerfiles = find_dockerfiles_paths(
+ join(root_path, BASE_IMAGES_PATH))
+
+ for dockerfile_path in base_dockerfiles:
+ process_build_dockerfile(dockerfile_path, root_path, global_context=True)
+
+ release_config = skaffold_conf['deploy']['helm']['releases'][0]
+ release_config['name'] = helm_values.namespace
+ release_config['namespace'] = helm_values.namespace
+ release_config['artifactOverrides'][KEY_APPS] = {}
+
+ static_images = set()
+ for root_path in root_paths:
+ static_dockerfiles = find_dockerfiles_paths(
+ join(root_path, STATIC_IMAGES_PATH))
+
+ for dockerfile_path in static_dockerfiles:
+ process_build_dockerfile(dockerfile_path, root_path)
+
+
+ for root_path in root_paths:
+ apps_path = join(root_path, APPS_PATH)
+ app_dockerfiles = find_dockerfiles_paths(apps_path)
+
+ release_config['artifactOverrides'][KEY_TASK_IMAGES] = {
+ task_image: remove_tag(helm_values[KEY_TASK_IMAGES][task_image])
+ for task_image in helm_values[KEY_TASK_IMAGES]
+ }
+ for dockerfile_path in app_dockerfiles:
+ app_relative_to_skaffold = os.path.relpath(
+ dockerfile_path, output_path)
+ context_path = os.path.relpath(dockerfile_path, '.')
+ app_relative_to_base = os.path.relpath(dockerfile_path, apps_path)
+ app_name = app_name_from_path(app_relative_to_base)
+ app_key = app_name.replace('-', '_')
+ if app_key not in apps:
+ if 'tasks' in app_relative_to_base and manage_task_images:
+ parent_app_name = app_name_from_path(
+ app_relative_to_base.split('/tasks')[0])
+ parent_app_key = parent_app_name.replace('-', '_')
+
+ if parent_app_key in apps:
+ artifacts[app_key] = build_artifact(get_image_tag(app_name), app_relative_to_skaffold,
+ base_images.union(static_images))
+
+ continue
+
+ build_requirements = apps[app_key][KEY_HARNESS].dependencies.build
+ # app_image_tag = remove_tag(
+ # apps[app_key][KEY_HARNESS][KEY_DEPLOYMENT]['image'])
+ # artifacts[app_key] = build_artifact(
+ # app_image_tag, app_relative_to_skaffold, build_requirements)
+ process_build_dockerfile(dockerfile_path, root_path, requirements=build_requirements, app_name=app_name)
+ app = apps[app_key]
+ if app[KEY_HARNESS][KEY_DEPLOYMENT]['image']:
+ release_config['artifactOverrides']['apps'][app_key] = \
+ {
+ KEY_HARNESS: {
+ KEY_DEPLOYMENT: {
+ 'image': remove_tag(app[KEY_HARNESS][KEY_DEPLOYMENT]['image'])
+ }
+ }
+ }
+
+ mains_candidates = find_file_paths(context_path, '__main__.py')
+
+ def identify_unicorn_based_main(candidates):
+ import re
+ gunicorn_pattern = re.compile(r"gunicorn")
+ # sort candidates, shortest path first
+ for candidate in sorted(candidates,key=lambda x: len(x.split("/"))):
+ dockerfile_path = f"{candidate}/.."
+ while not exists(f"{dockerfile_path}/Dockerfile") and abspath(dockerfile_path) != abspath(root_path):
+ dockerfile_path += "/.."
+ dockerfile = f"{dockerfile_path}/Dockerfile"
+ if not exists(dockerfile):
+ continue
+ with open(dockerfile, 'r') as file:
+ if re.search(gunicorn_pattern, file.read()):
+ return candidate
+ requirements = f"{candidate}/../requirements.txt"
+ if not exists(requirements):
+ continue
+ with open(requirements, 'r') as file:
+ if re.search(gunicorn_pattern, file.read()):
+ return candidate
+ return None
+
+ task_main_file = identify_unicorn_based_main(mains_candidates)
+
+ if task_main_file:
+ release_config['overrides']['apps'][app_key] = \
+ {
+ 'harness': {
+ 'deployment': {
+ 'command': ['python'],
+ 'args': [f'/usr/src/app/{os.path.basename(task_main_file)}/__main__.py']
+ }
+ }
+ }
+
+ test_config: ApplicationTestConfig = helm_values.apps[app_key].harness.test
+ if test_config.unit.enabled and test_config.unit.commands:
+
+ skaffold_conf['test'].append(dict(
+ image=get_image_tag(app_name),
+ custom=[dict(command="docker run $IMAGE " + cmd) for cmd in test_config.unit.commands]
+ ))
+
+
+ del skaffold_conf['deploy']
+ skaffold_conf['deploy'] = {
+ 'docker': {
+ 'useCompose': True,
+ 'images': [artifact['image'] for artifact in artifacts.values() if artifact['image']]
+ }
+ }
+
+ skaffold_conf['build']['artifacts'] = [v for v in artifacts.values()]
+ import ipdb; ipdb.set_trace() # fmt: skip
+
+ merge_to_yaml_file(skaffold_conf, os.path.join(
+ output_path, 'skaffold.yaml'))
+
+ return skaffold_conf
+
+
+def create_vscode_debug_configuration(root_paths, helm_values):
+ logging.info(
+ "Creating VS code cloud build configuration.\nCloud build extension is needed to debug.")
+
+ vscode_launch_path = '.vscode/launch.json'
+
+ vs_conf = get_json_template(vscode_launch_path, True)
+ base_image_name = helm_values.name
+ debug_conf = get_json_template('vscode-debug-template.json', True)
+
+ def get_image_tag(name):
+ return f"{get_image_name(name, base_image_name)}"
+
+ if helm_values.registry.name:
+ base_image_name = helm_values.registry.name + helm_values.name
+ for i in range(len(vs_conf['configurations'])):
+ conf = vs_conf['configurations'][i]
+ if conf['name'] == debug_conf['name']:
+ del vs_conf['configurations'][i]
+ break
+ vs_conf['configurations'].append(debug_conf)
+
+ apps = helm_values.apps
+
+ for root_path in root_paths:
+ apps_path = os.path.join(root_path, 'applications')
+
+ src_root_paths = find_file_paths(apps_path, 'setup.py')
+
+ for path in src_root_paths:
+ app_relative_to_base = os.path.relpath(path, apps_path)
+ app_relative_to_root = os.path.relpath(path, '.')
+ app_name = app_name_from_path(app_relative_to_base.split('/')[0])
+ app_key = app_name.replace('-', '_')
+ if app_key in apps.keys():
+ debug_conf["debug"].append({
+ "image": get_image_tag(app_name),
+ "sourceFileMap": {
+ "justMyCode": False,
+ f"${{workspaceFolder}}/{app_relative_to_root}": apps[app_key].harness.get('sourceRoot',
+ "/usr/src/app"),
+ }
+ })
+
+
+ if not os.path.exists(os.path.dirname(vscode_launch_path)):
+ os.makedirs(os.path.dirname(vscode_launch_path))
+ with open(vscode_launch_path, 'w') as f:
+ json.dump(vs_conf, f, indent=2, sort_keys=True)
\ No newline at end of file
diff --git a/tools/deployment-cli-tools/harness-deployment b/tools/deployment-cli-tools/harness-deployment
index e5cf49f1..97897516 100644
--- a/tools/deployment-cli-tools/harness-deployment
+++ b/tools/deployment-cli-tools/harness-deployment
@@ -7,6 +7,7 @@ import os
from ch_cli_tools.dockercompose import create_docker_compose_configuration
from ch_cli_tools.helm import create_helm_chart, hosts_info, deploy
from ch_cli_tools.skaffold import create_skaffold_configuration, create_vscode_debug_configuration
+from ch_cli_tools.skaffoldcompose import create_skaffold_compose_configuration
from ch_cli_tools.codefresh import create_codefresh_deployment_scripts, write_env_file
from ch_cli_tools.preprocessing import preprocess_build_overrides
from ch_cli_tools.utils import merge_app_directories
@@ -148,7 +149,10 @@ if __name__ == "__main__":
if args.write_env:
write_env_file(helm_values, os.path.join(root_paths[-1], DEPLOYMENT_PATH, ".env"))
- create_skaffold_configuration(merged_root_paths, helm_values)
+ if not args.docker_compose:
+ create_skaffold_configuration(merged_root_paths, helm_values)
+ else:
+ create_skaffold_compose_configuration(merged_root_paths, helm_values)
create_vscode_debug_configuration(root_paths, helm_values)
hosts_info(helm_values)
From 528754579c3a33e993f623cb9e1a4fe9d86748fa Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 7 Feb 2024 10:33:07 -0600
Subject: [PATCH 013/210] CH-100 Make skaffold script a little bit more generic
(ugly)
---
.../cloudharness_utils/constants.py | 2 +
.../ch_cli_tools/skaffold.py | 19 +-
.../ch_cli_tools/skaffoldcompose.py | 251 ------------------
tools/deployment-cli-tools/harness-deployment | 21 +-
4 files changed, 18 insertions(+), 275 deletions(-)
delete mode 100644 tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
diff --git a/libraries/cloudharness-utils/cloudharness_utils/constants.py b/libraries/cloudharness-utils/cloudharness_utils/constants.py
index 168b7811..a5163f2b 100644
--- a/libraries/cloudharness-utils/cloudharness_utils/constants.py
+++ b/libraries/cloudharness-utils/cloudharness_utils/constants.py
@@ -9,8 +9,10 @@
HELM_PATH = "helm"
HELM_CHART_PATH = HELM_PATH
+HELM_ENGINE = HELM_PATH
COMPOSE = 'compose'
+COMPOSE_ENGINE = 'docker-compose'
INFRASTRUCTURE_PATH = 'infrastructure'
STATIC_IMAGES_PATH = os.path.join(INFRASTRUCTURE_PATH, 'common-images')
diff --git a/tools/deployment-cli-tools/ch_cli_tools/skaffold.py b/tools/deployment-cli-tools/ch_cli_tools/skaffold.py
index c0de5764..bc66d616 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/skaffold.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/skaffold.py
@@ -7,7 +7,7 @@
from cloudharness_model import ApplicationTestConfig, HarnessMainConfig
from cloudharness_utils.constants import APPS_PATH, DEPLOYMENT_CONFIGURATION_PATH, \
- BASE_IMAGES_PATH, STATIC_IMAGES_PATH
+ BASE_IMAGES_PATH, STATIC_IMAGES_PATH, HELM_ENGINE, COMPOSE_ENGINE
from .helm import KEY_APPS, KEY_HARNESS, KEY_DEPLOYMENT, KEY_TASK_IMAGES
from .utils import get_template, dict_merge, find_dockerfiles_paths, app_name_from_path, \
find_file_paths, guess_build_dependencies_from_dockerfile, merge_to_yaml_file, get_json_template, get_image_name
@@ -17,12 +17,13 @@ def relpath_if(p1, p2):
return p1
return relpath(p1, p2)
-def create_skaffold_configuration(root_paths, helm_values: HarnessMainConfig, output_path='.', manage_task_images=True):
+def create_skaffold_configuration(root_paths, helm_values: HarnessMainConfig, output_path='.', manage_task_images=True, backend_deploy=HELM_ENGINE):
skaffold_conf = get_template('skaffold-template.yaml', True)
apps = helm_values.apps
base_image_name = (helm_values.registry.name or "") + helm_values.name
artifacts = {}
overrides = {}
+ backend = backend_deploy or HELM_ENGINE
def remove_tag(image_name):
return image_name.split(":")[0]
@@ -183,10 +184,18 @@ def identify_unicorn_based_main(candidates):
custom=[dict(command="docker run $IMAGE " + cmd) for cmd in test_config.unit.commands]
))
+ if backend == COMPOSE_ENGINE:
+ del skaffold_conf['deploy']
+ skaffold_conf['deploy'] = {
+ 'docker': {
+ 'useCompose': True,
+ 'images': [artifact['image'] for artifact in artifacts.values() if artifact['image']]
+ }
+ }
- skaffold_conf['build']['artifacts'] = [v for v in artifacts.values()]
- merge_to_yaml_file(skaffold_conf, os.path.join(
- output_path, 'skaffold.yaml'))
+ skaffold_conf['build']['artifacts'] = [v for v in artifacts.values()]
+ merge_to_yaml_file(skaffold_conf, os.path.join(
+ output_path, 'skaffold.yaml'))
return skaffold_conf
diff --git a/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py b/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
deleted file mode 100644
index 27a4701a..00000000
--- a/tools/deployment-cli-tools/ch_cli_tools/skaffoldcompose.py
+++ /dev/null
@@ -1,251 +0,0 @@
-import os
-import logging
-import json
-import time
-
-from os.path import join, relpath, basename, exists, abspath
-from cloudharness_model import ApplicationTestConfig, HarnessMainConfig
-
-from cloudharness_utils.constants import APPS_PATH, DEPLOYMENT_CONFIGURATION_PATH, \
- BASE_IMAGES_PATH, STATIC_IMAGES_PATH, COMPOSE, HELM_PATH
-from .helm import KEY_APPS, KEY_HARNESS, KEY_DEPLOYMENT, KEY_TASK_IMAGES
-from .utils import get_template, dict_merge, find_dockerfiles_paths, app_name_from_path, \
- find_file_paths, guess_build_dependencies_from_dockerfile, merge_to_yaml_file, get_json_template, get_image_name
-
-def relpath_if(p1, p2):
- if os.path.isabs(p1):
- return p1
- return relpath(p1, p2)
-
-def create_skaffold_compose_configuration(root_paths, helm_values: HarnessMainConfig, output_path='.', manage_task_images=True):
- skaffold_conf = get_template('skaffold-template.yaml', True)
- apps = helm_values.apps
- base_image_name = (helm_values.registry.name or "") + helm_values.name
- artifacts = {}
- overrides = {}
-
- def remove_tag(image_name):
- return image_name.split(":")[0]
-
- def get_image_tag(name):
- return f"{get_image_name(name, base_image_name)}"
-
- builds = {}
-
- def build_artifact(image_name, context_path, requirements=None, dockerfile_path=''):
- artifact_spec = {
- 'image': image_name,
- 'context': context_path,
- 'docker': {
- 'dockerfile': join(dockerfile_path, 'Dockerfile'),
- 'buildArgs': {
- 'REGISTRY': helm_values.registry.name,
- 'TAG': helm_values.tag,
- 'NOCACHE': str(time.time())
- },
- 'ssh': 'default'
- }
- }
- if requirements:
- artifact_spec['requires'] = [{'image': get_image_tag(req), 'alias': req.replace('-', '_').upper()} for req
- in requirements]
- return artifact_spec
-
-
- base_images = set()
-
- def process_build_dockerfile(dockerfile_path, root_path, global_context=False, requirements=None, app_name=None):
- if app_name is None:
- app_name = app_name_from_path(basename(dockerfile_path))
- if app_name in helm_values[KEY_TASK_IMAGES] or app_name.replace("-", "_") in helm_values.apps:
- context_path = relpath_if(root_path, output_path) if global_context else relpath_if(dockerfile_path, output_path)
-
- builds[app_name] = context_path
- base_images.add(get_image_name(app_name))
- artifacts[app_name] = build_artifact(
- get_image_tag(app_name),
- context_path,
- dockerfile_path=relpath(dockerfile_path, output_path),
- requirements=requirements or guess_build_dependencies_from_dockerfile(dockerfile_path)
- )
-
- for root_path in root_paths:
- skaffold_conf = dict_merge(skaffold_conf, get_template(
- join(root_path, DEPLOYMENT_CONFIGURATION_PATH, 'skaffold-template.yaml')))
-
- base_dockerfiles = find_dockerfiles_paths(
- join(root_path, BASE_IMAGES_PATH))
-
- for dockerfile_path in base_dockerfiles:
- process_build_dockerfile(dockerfile_path, root_path, global_context=True)
-
- release_config = skaffold_conf['deploy']['helm']['releases'][0]
- release_config['name'] = helm_values.namespace
- release_config['namespace'] = helm_values.namespace
- release_config['artifactOverrides'][KEY_APPS] = {}
-
- static_images = set()
- for root_path in root_paths:
- static_dockerfiles = find_dockerfiles_paths(
- join(root_path, STATIC_IMAGES_PATH))
-
- for dockerfile_path in static_dockerfiles:
- process_build_dockerfile(dockerfile_path, root_path)
-
-
- for root_path in root_paths:
- apps_path = join(root_path, APPS_PATH)
- app_dockerfiles = find_dockerfiles_paths(apps_path)
-
- release_config['artifactOverrides'][KEY_TASK_IMAGES] = {
- task_image: remove_tag(helm_values[KEY_TASK_IMAGES][task_image])
- for task_image in helm_values[KEY_TASK_IMAGES]
- }
- for dockerfile_path in app_dockerfiles:
- app_relative_to_skaffold = os.path.relpath(
- dockerfile_path, output_path)
- context_path = os.path.relpath(dockerfile_path, '.')
- app_relative_to_base = os.path.relpath(dockerfile_path, apps_path)
- app_name = app_name_from_path(app_relative_to_base)
- app_key = app_name.replace('-', '_')
- if app_key not in apps:
- if 'tasks' in app_relative_to_base and manage_task_images:
- parent_app_name = app_name_from_path(
- app_relative_to_base.split('/tasks')[0])
- parent_app_key = parent_app_name.replace('-', '_')
-
- if parent_app_key in apps:
- artifacts[app_key] = build_artifact(get_image_tag(app_name), app_relative_to_skaffold,
- base_images.union(static_images))
-
- continue
-
- build_requirements = apps[app_key][KEY_HARNESS].dependencies.build
- # app_image_tag = remove_tag(
- # apps[app_key][KEY_HARNESS][KEY_DEPLOYMENT]['image'])
- # artifacts[app_key] = build_artifact(
- # app_image_tag, app_relative_to_skaffold, build_requirements)
- process_build_dockerfile(dockerfile_path, root_path, requirements=build_requirements, app_name=app_name)
- app = apps[app_key]
- if app[KEY_HARNESS][KEY_DEPLOYMENT]['image']:
- release_config['artifactOverrides']['apps'][app_key] = \
- {
- KEY_HARNESS: {
- KEY_DEPLOYMENT: {
- 'image': remove_tag(app[KEY_HARNESS][KEY_DEPLOYMENT]['image'])
- }
- }
- }
-
- mains_candidates = find_file_paths(context_path, '__main__.py')
-
- def identify_unicorn_based_main(candidates):
- import re
- gunicorn_pattern = re.compile(r"gunicorn")
- # sort candidates, shortest path first
- for candidate in sorted(candidates,key=lambda x: len(x.split("/"))):
- dockerfile_path = f"{candidate}/.."
- while not exists(f"{dockerfile_path}/Dockerfile") and abspath(dockerfile_path) != abspath(root_path):
- dockerfile_path += "/.."
- dockerfile = f"{dockerfile_path}/Dockerfile"
- if not exists(dockerfile):
- continue
- with open(dockerfile, 'r') as file:
- if re.search(gunicorn_pattern, file.read()):
- return candidate
- requirements = f"{candidate}/../requirements.txt"
- if not exists(requirements):
- continue
- with open(requirements, 'r') as file:
- if re.search(gunicorn_pattern, file.read()):
- return candidate
- return None
-
- task_main_file = identify_unicorn_based_main(mains_candidates)
-
- if task_main_file:
- release_config['overrides']['apps'][app_key] = \
- {
- 'harness': {
- 'deployment': {
- 'command': ['python'],
- 'args': [f'/usr/src/app/{os.path.basename(task_main_file)}/__main__.py']
- }
- }
- }
-
- test_config: ApplicationTestConfig = helm_values.apps[app_key].harness.test
- if test_config.unit.enabled and test_config.unit.commands:
-
- skaffold_conf['test'].append(dict(
- image=get_image_tag(app_name),
- custom=[dict(command="docker run $IMAGE " + cmd) for cmd in test_config.unit.commands]
- ))
-
-
- del skaffold_conf['deploy']
- skaffold_conf['deploy'] = {
- 'docker': {
- 'useCompose': True,
- 'images': [artifact['image'] for artifact in artifacts.values() if artifact['image']]
- }
- }
-
- skaffold_conf['build']['artifacts'] = [v for v in artifacts.values()]
- import ipdb; ipdb.set_trace() # fmt: skip
-
- merge_to_yaml_file(skaffold_conf, os.path.join(
- output_path, 'skaffold.yaml'))
-
- return skaffold_conf
-
-
-def create_vscode_debug_configuration(root_paths, helm_values):
- logging.info(
- "Creating VS code cloud build configuration.\nCloud build extension is needed to debug.")
-
- vscode_launch_path = '.vscode/launch.json'
-
- vs_conf = get_json_template(vscode_launch_path, True)
- base_image_name = helm_values.name
- debug_conf = get_json_template('vscode-debug-template.json', True)
-
- def get_image_tag(name):
- return f"{get_image_name(name, base_image_name)}"
-
- if helm_values.registry.name:
- base_image_name = helm_values.registry.name + helm_values.name
- for i in range(len(vs_conf['configurations'])):
- conf = vs_conf['configurations'][i]
- if conf['name'] == debug_conf['name']:
- del vs_conf['configurations'][i]
- break
- vs_conf['configurations'].append(debug_conf)
-
- apps = helm_values.apps
-
- for root_path in root_paths:
- apps_path = os.path.join(root_path, 'applications')
-
- src_root_paths = find_file_paths(apps_path, 'setup.py')
-
- for path in src_root_paths:
- app_relative_to_base = os.path.relpath(path, apps_path)
- app_relative_to_root = os.path.relpath(path, '.')
- app_name = app_name_from_path(app_relative_to_base.split('/')[0])
- app_key = app_name.replace('-', '_')
- if app_key in apps.keys():
- debug_conf["debug"].append({
- "image": get_image_tag(app_name),
- "sourceFileMap": {
- "justMyCode": False,
- f"${{workspaceFolder}}/{app_relative_to_root}": apps[app_key].harness.get('sourceRoot',
- "/usr/src/app"),
- }
- })
-
-
- if not os.path.exists(os.path.dirname(vscode_launch_path)):
- os.makedirs(os.path.dirname(vscode_launch_path))
- with open(vscode_launch_path, 'w') as f:
- json.dump(vs_conf, f, indent=2, sort_keys=True)
\ No newline at end of file
diff --git a/tools/deployment-cli-tools/harness-deployment b/tools/deployment-cli-tools/harness-deployment
index 97897516..9a5cc78c 100644
--- a/tools/deployment-cli-tools/harness-deployment
+++ b/tools/deployment-cli-tools/harness-deployment
@@ -7,11 +7,10 @@ import os
from ch_cli_tools.dockercompose import create_docker_compose_configuration
from ch_cli_tools.helm import create_helm_chart, hosts_info, deploy
from ch_cli_tools.skaffold import create_skaffold_configuration, create_vscode_debug_configuration
-from ch_cli_tools.skaffoldcompose import create_skaffold_compose_configuration
from ch_cli_tools.codefresh import create_codefresh_deployment_scripts, write_env_file
from ch_cli_tools.preprocessing import preprocess_build_overrides
from ch_cli_tools.utils import merge_app_directories
-from cloudharness_utils.constants import DEPLOYMENT_PATH
+from cloudharness_utils.constants import DEPLOYMENT_PATH, COMPOSE_ENGINE
HERE = os.path.dirname(os.path.realpath(__file__)).replace(os.path.sep, '/')
ROOT = os.path.dirname(os.path.dirname(HERE)).replace(os.path.sep, '/')
@@ -85,22 +84,6 @@ if __name__ == "__main__":
merge_app_directories(root_paths, destination=args.merge)
root_paths = [args.merge]
- # helm_values = create_helm_chart(
- # root_paths,
- # tag=args.tag,
- # registry=args.registry,
- # domain=args.domain,
- # local=args.local,
- # secured=not args.unsecured,
- # output_path=args.output_path,
- # exclude=args.exclude,
- # include=args.include,
- # registry_secret=args.registry_secret,
- # tls=not args.no_tls,
- # env=envs,
- # namespace=args.namespace
- # )
-
if not args.docker_compose:
helm_values = create_helm_chart(
root_paths,
@@ -152,7 +135,7 @@ if __name__ == "__main__":
if not args.docker_compose:
create_skaffold_configuration(merged_root_paths, helm_values)
else:
- create_skaffold_compose_configuration(merged_root_paths, helm_values)
+ create_skaffold_configuration(merged_root_paths, helm_values, backend_deploy=COMPOSE_ENGINE)
create_vscode_debug_configuration(root_paths, helm_values)
hosts_info(helm_values)
From 9f75c9c109f0591ff845c20d370aba21e93fc74e Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 7 Feb 2024 11:30:18 -0600
Subject: [PATCH 014/210] CH-100 Fix issue with entrypoint
---
deployment-configuration/compose/templates/auto-compose.yaml | 3 +++
tools/deployment-cli-tools/ch_cli_tools/dockercompose.py | 4 ++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 5b4893ba..43bd8401 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -29,6 +29,9 @@ services:
reservations:
cpus: {{ $deployment.resources.requests.cpu | default "25m" }}
memory: {{ trimSuffix "i" $deployment.resources.requests.memory | default "32M" }}
+ {{- with $deployment.command }}
+ entrypoint: {{ cat . $deployment.args }}
+ {{- end }}
environment:
- CH_CURRENT_APP_NAME={{ $app_name | quote }}
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 2cf768a4..a935899e 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -484,8 +484,8 @@ def inject_entry_points_commands(self, helm_values, image_path, app_path):
task_main_file = identify_unicorn_based_main(mains_candidates, app_path)
if task_main_file:
- helm_values[KEY_HARNESS]['deployment']['command'] = ['python']
- helm_values[KEY_HARNESS]['deployment']['args'] = [f'/usr/src/app/{os.path.basename(task_main_file)}/__main__.py']
+ helm_values[KEY_HARNESS]['deployment']['command'] = 'python'
+ helm_values[KEY_HARNESS]['deployment']['args'] = f'/usr/src/app/{os.path.basename(task_main_file)}/__main__.py'
def get_included_with_dependencies(values, include):
From c159a4f0d266592d4269e4911969065bd7acb764 Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 8 Feb 2024 08:18:56 -0600
Subject: [PATCH 015/210] CH-100 Remove generation of chart files for
docker-compose
---
.../ch_cli_tools/dockercompose.py | 24 +++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index a935899e..9abcd565 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -546,18 +546,18 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat
merge_configuration_directories(f"{resources_dir}", f"{dest_dir}")
- subchart_dir = app_path / 'deploy/charts'
- if subchart_dir.exists():
- dest_dir = dest_helm_chart_path / 'charts' / app_name
-
- logging.info(
- "Collecting templates for application %s to %s", app_name, dest_dir)
- if dest_dir.exists():
- logging.warning(
- "Merging/overriding all files in directory %s", dest_dir)
- merge_configuration_directories(f"{subchart_dir}", f"{dest_dir}")
- else:
- shutil.copytree(subchart_dir, dest_dir)
+ # subchart_dir = app_path / 'deploy/charts'
+ # if subchart_dir.exists():
+ # dest_dir = dest_helm_chart_path / 'charts' / app_name
+
+ # logging.info(
+ # "Collecting templates for application %s to %s", app_name, dest_dir)
+ # if dest_dir.exists():
+ # logging.warning(
+ # "Merging/overriding all files in directory %s", dest_dir)
+ # merge_configuration_directories(f"{subchart_dir}", f"{dest_dir}")
+ # else:
+ # shutil.copytree(subchart_dir, dest_dir)
def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart):
From 02bd318b84667e8cfc4a3a94fc0e2020c2ff79ac Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 9 Feb 2024 10:34:44 -0600
Subject: [PATCH 016/210] CH-100 Rollback default per-app helm template
directory
---
.../argo/deploy/templates/{helm => }/argo-sa.yaml | 0
.../deploy/templates/{helm => }/broker-config.yml | 0
.../events/deploy/templates/{helm => }/configmap.yaml | 0
.../deploy/templates/{helm => }/deployments.yml | 0
.../events/deploy/templates/{helm => }/roles.yml | 0
.../events/deploy/templates/{helm => }/services.yml | 0
.../events/deploy/templates/{helm => }/zoo-config.yml | 0
.../templates/{helm => }/_helpers-auth-rework.tpl | 0
.../deploy/templates/{helm => }/_helpers-names.tpl | 0
.../deploy/templates/{helm => }/_helpers.tpl | 0
.../templates/{helm => }/hub/_helpers-passwords.tpl | 0
.../deploy/templates/{helm => }/hub/configmap.yaml | 0
.../deploy/templates/{helm => }/hub/deployment.yaml | 0
.../deploy/templates/{helm => }/hub/netpol.yaml | 0
.../deploy/templates/{helm => }/hub/pdb.yaml | 0
.../deploy/templates/{helm => }/hub/pvc.yaml | 0
.../deploy/templates/{helm => }/hub/rbac.yaml | 0
.../deploy/templates/{helm => }/hub/secret.yaml | 0
.../deploy/templates/{helm => }/hub/service.yaml | 0
.../{helm => }/image-puller/_helpers-daemonset.tpl | 0
.../{helm => }/image-puller/daemonset-continuous.yaml | 0
.../{helm => }/image-puller/daemonset-hook.yaml | 0
.../deploy/templates/{helm => }/image-puller/job.yaml | 0
.../templates/{helm => }/image-puller/rbac.yaml | 0
.../templates/{helm => }/proxy/autohttps/_README.txt | 0
.../{helm => }/proxy/autohttps/configmap.yaml | 0
.../{helm => }/proxy/autohttps/deployment.yaml | 0
.../templates/{helm => }/proxy/autohttps/rbac.yaml | 0
.../templates/{helm => }/proxy/autohttps/service.yaml | 0
.../deploy/templates/{helm => }/proxy/deployment.yaml | 0
.../deploy/templates/{helm => }/proxy/netpol.yaml | 0
.../deploy/templates/{helm => }/proxy/pdb.yaml | 0
.../deploy/templates/{helm => }/proxy/secret.yaml | 0
.../deploy/templates/{helm => }/proxy/service.yaml | 0
.../{helm => }/scheduling/_scheduling-helpers.tpl | 0
.../{helm => }/scheduling/priorityclass.yaml | 0
.../{helm => }/scheduling/user-placeholder/pdb.yaml | 0
.../scheduling/user-placeholder/priorityclass.yaml | 0
.../scheduling/user-placeholder/statefulset.yaml | 0
.../scheduling/user-scheduler/configmap.yaml | 0
.../scheduling/user-scheduler/deployment.yaml | 0
.../{helm => }/scheduling/user-scheduler/pdb.yaml | 0
.../{helm => }/scheduling/user-scheduler/rbac.yaml | 0
.../templates/{helm => }/singleuser/netpol.yaml | 0
.../deploy/templates/{helm => }/_helpers.tpl | 0
.../deploy/templates/{helm => }/clusterrole.yaml | 0
.../templates/{helm => }/clusterrolebinding.yaml | 0
.../deploy/templates/{helm => }/nfs-server.yaml | 0
.../templates/{helm => }/podsecuritypolicy.yaml | 0
.../nfsserver/deploy/templates/{helm => }/role.yaml | 0
.../deploy/templates/{helm => }/rolebinding.yaml | 0
.../deploy/templates/{helm => }/serviceaccount.yaml | 0
.../deploy/templates/{helm => }/storageclass.yaml | 0
.../sentry/deploy/templates/{helm => }/redis.yaml | 0
.../compose/templates/auto-compose.yaml | 11 +++++++----
.../ch_cli_tools/dockercompose.py | 2 +-
tools/deployment-cli-tools/ch_cli_tools/helm.py | 2 +-
57 files changed, 9 insertions(+), 6 deletions(-)
rename applications/argo/deploy/templates/{helm => }/argo-sa.yaml (100%)
rename applications/events/deploy/templates/{helm => }/broker-config.yml (100%)
rename applications/events/deploy/templates/{helm => }/configmap.yaml (100%)
rename applications/events/deploy/templates/{helm => }/deployments.yml (100%)
rename applications/events/deploy/templates/{helm => }/roles.yml (100%)
rename applications/events/deploy/templates/{helm => }/services.yml (100%)
rename applications/events/deploy/templates/{helm => }/zoo-config.yml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/_helpers-auth-rework.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/_helpers-names.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/_helpers.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/_helpers-passwords.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/netpol.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/pvc.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/secret.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/hub/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/image-puller/_helpers-daemonset.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/image-puller/daemonset-continuous.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/image-puller/daemonset-hook.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/image-puller/job.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/image-puller/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/autohttps/_README.txt (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/autohttps/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/autohttps/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/autohttps/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/autohttps/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/netpol.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/secret.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/proxy/service.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/_scheduling-helpers.tpl (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/priorityclass.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-placeholder/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-placeholder/priorityclass.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-placeholder/statefulset.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-scheduler/configmap.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-scheduler/deployment.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-scheduler/pdb.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/scheduling/user-scheduler/rbac.yaml (100%)
rename applications/jupyterhub/deploy/templates/{helm => }/singleuser/netpol.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/_helpers.tpl (100%)
rename applications/nfsserver/deploy/templates/{helm => }/clusterrole.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/clusterrolebinding.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/nfs-server.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/podsecuritypolicy.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/role.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/rolebinding.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/serviceaccount.yaml (100%)
rename applications/nfsserver/deploy/templates/{helm => }/storageclass.yaml (100%)
rename applications/sentry/deploy/templates/{helm => }/redis.yaml (100%)
diff --git a/applications/argo/deploy/templates/helm/argo-sa.yaml b/applications/argo/deploy/templates/argo-sa.yaml
similarity index 100%
rename from applications/argo/deploy/templates/helm/argo-sa.yaml
rename to applications/argo/deploy/templates/argo-sa.yaml
diff --git a/applications/events/deploy/templates/helm/broker-config.yml b/applications/events/deploy/templates/broker-config.yml
similarity index 100%
rename from applications/events/deploy/templates/helm/broker-config.yml
rename to applications/events/deploy/templates/broker-config.yml
diff --git a/applications/events/deploy/templates/helm/configmap.yaml b/applications/events/deploy/templates/configmap.yaml
similarity index 100%
rename from applications/events/deploy/templates/helm/configmap.yaml
rename to applications/events/deploy/templates/configmap.yaml
diff --git a/applications/events/deploy/templates/helm/deployments.yml b/applications/events/deploy/templates/deployments.yml
similarity index 100%
rename from applications/events/deploy/templates/helm/deployments.yml
rename to applications/events/deploy/templates/deployments.yml
diff --git a/applications/events/deploy/templates/helm/roles.yml b/applications/events/deploy/templates/roles.yml
similarity index 100%
rename from applications/events/deploy/templates/helm/roles.yml
rename to applications/events/deploy/templates/roles.yml
diff --git a/applications/events/deploy/templates/helm/services.yml b/applications/events/deploy/templates/services.yml
similarity index 100%
rename from applications/events/deploy/templates/helm/services.yml
rename to applications/events/deploy/templates/services.yml
diff --git a/applications/events/deploy/templates/helm/zoo-config.yml b/applications/events/deploy/templates/zoo-config.yml
similarity index 100%
rename from applications/events/deploy/templates/helm/zoo-config.yml
rename to applications/events/deploy/templates/zoo-config.yml
diff --git a/applications/jupyterhub/deploy/templates/helm/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/_helpers-auth-rework.tpl
rename to applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/_helpers-names.tpl
rename to applications/jupyterhub/deploy/templates/_helpers-names.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/_helpers.tpl
rename to applications/jupyterhub/deploy/templates/_helpers.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/_helpers-passwords.tpl b/applications/jupyterhub/deploy/templates/hub/_helpers-passwords.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/_helpers-passwords.tpl
rename to applications/jupyterhub/deploy/templates/hub/_helpers-passwords.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/configmap.yaml
rename to applications/jupyterhub/deploy/templates/hub/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/deployment.yaml
rename to applications/jupyterhub/deploy/templates/hub/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/netpol.yaml
rename to applications/jupyterhub/deploy/templates/hub/netpol.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/pdb.yaml
rename to applications/jupyterhub/deploy/templates/hub/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/pvc.yaml b/applications/jupyterhub/deploy/templates/hub/pvc.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/pvc.yaml
rename to applications/jupyterhub/deploy/templates/hub/pvc.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/rbac.yaml
rename to applications/jupyterhub/deploy/templates/hub/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/secret.yaml b/applications/jupyterhub/deploy/templates/hub/secret.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/secret.yaml
rename to applications/jupyterhub/deploy/templates/hub/secret.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/hub/service.yaml b/applications/jupyterhub/deploy/templates/hub/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/hub/service.yaml
rename to applications/jupyterhub/deploy/templates/hub/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/image-puller/_helpers-daemonset.tpl
rename to applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-continuous.yaml b/applications/jupyterhub/deploy/templates/image-puller/daemonset-continuous.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-continuous.yaml
rename to applications/jupyterhub/deploy/templates/image-puller/daemonset-continuous.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-hook.yaml b/applications/jupyterhub/deploy/templates/image-puller/daemonset-hook.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/image-puller/daemonset-hook.yaml
rename to applications/jupyterhub/deploy/templates/image-puller/daemonset-hook.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/image-puller/job.yaml
rename to applications/jupyterhub/deploy/templates/image-puller/job.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/image-puller/rbac.yaml
rename to applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/autohttps/_README.txt
rename to applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/autohttps/configmap.yaml
rename to applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/autohttps/deployment.yaml
rename to applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/autohttps/rbac.yaml
rename to applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/autohttps/service.yaml
rename to applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/deployment.yaml
rename to applications/jupyterhub/deploy/templates/proxy/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/netpol.yaml
rename to applications/jupyterhub/deploy/templates/proxy/netpol.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/pdb.yaml
rename to applications/jupyterhub/deploy/templates/proxy/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/secret.yaml b/applications/jupyterhub/deploy/templates/proxy/secret.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/secret.yaml
rename to applications/jupyterhub/deploy/templates/proxy/secret.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/proxy/service.yaml
rename to applications/jupyterhub/deploy/templates/proxy/service.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/_scheduling-helpers.tpl b/applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/_scheduling-helpers.tpl
rename to applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/priorityclass.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/pdb.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/priorityclass.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-placeholder/statefulset.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/configmap.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/deployment.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/pdb.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/scheduling/user-scheduler/rbac.yaml
rename to applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
diff --git a/applications/jupyterhub/deploy/templates/helm/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
similarity index 100%
rename from applications/jupyterhub/deploy/templates/helm/singleuser/netpol.yaml
rename to applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/_helpers.tpl b/applications/nfsserver/deploy/templates/_helpers.tpl
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/_helpers.tpl
rename to applications/nfsserver/deploy/templates/_helpers.tpl
diff --git a/applications/nfsserver/deploy/templates/helm/clusterrole.yaml b/applications/nfsserver/deploy/templates/clusterrole.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/clusterrole.yaml
rename to applications/nfsserver/deploy/templates/clusterrole.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/clusterrolebinding.yaml b/applications/nfsserver/deploy/templates/clusterrolebinding.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/clusterrolebinding.yaml
rename to applications/nfsserver/deploy/templates/clusterrolebinding.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/nfs-server.yaml b/applications/nfsserver/deploy/templates/nfs-server.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/nfs-server.yaml
rename to applications/nfsserver/deploy/templates/nfs-server.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/podsecuritypolicy.yaml b/applications/nfsserver/deploy/templates/podsecuritypolicy.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/podsecuritypolicy.yaml
rename to applications/nfsserver/deploy/templates/podsecuritypolicy.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/role.yaml b/applications/nfsserver/deploy/templates/role.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/role.yaml
rename to applications/nfsserver/deploy/templates/role.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/rolebinding.yaml b/applications/nfsserver/deploy/templates/rolebinding.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/rolebinding.yaml
rename to applications/nfsserver/deploy/templates/rolebinding.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/serviceaccount.yaml b/applications/nfsserver/deploy/templates/serviceaccount.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/serviceaccount.yaml
rename to applications/nfsserver/deploy/templates/serviceaccount.yaml
diff --git a/applications/nfsserver/deploy/templates/helm/storageclass.yaml b/applications/nfsserver/deploy/templates/storageclass.yaml
similarity index 100%
rename from applications/nfsserver/deploy/templates/helm/storageclass.yaml
rename to applications/nfsserver/deploy/templates/storageclass.yaml
diff --git a/applications/sentry/deploy/templates/helm/redis.yaml b/applications/sentry/deploy/templates/redis.yaml
similarity index 100%
rename from applications/sentry/deploy/templates/helm/redis.yaml
rename to applications/sentry/deploy/templates/redis.yaml
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 43bd8401..cdf8dddd 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -45,10 +45,13 @@ services:
- {{ .name }}={{ .value | quote }}
{{- end }}
{{- with $app_config.harness.dependencies.soft }}
- # links:
- # {{- range . }}
- # - {{ . }}
- # {{- end }}
+ links:
+ {{- range . }}
+ - {{ . }}
+ {{- with $app_config.harness.domain }}
+ :{{- . }}
+ {{- end }}
+ {{- end }}
{{- end }}
{{- with $app_config.harness.dependencies.hard }}
depends_on:
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 9abcd565..2c2a2c35 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -525,7 +525,7 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat
app_name = app_name_from_path(os.path.relpath(f"{app_path}", app_base_path))
if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
- template_dir = app_path / 'deploy' / 'templates' / templates_path
+ template_dir = app_path / 'deploy' / f'templates-{templates_path}'
if template_dir.exists():
dest_dir = dest_helm_chart_path / 'templates' / app_name
diff --git a/tools/deployment-cli-tools/ch_cli_tools/helm.py b/tools/deployment-cli-tools/ch_cli_tools/helm.py
index 9bd43b8c..64683197 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/helm.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/helm.py
@@ -521,7 +521,7 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=(), i
app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
- template_dir = os.path.join(app_path, 'deploy', 'templates', HELM_PATH)
+ template_dir = os.path.join(app_path, 'deploy', 'templates')
if os.path.exists(template_dir):
dest_dir = os.path.join(
dest_helm_chart_path, 'templates', app_name)
From 1a570647182d3f89dd057cbe7a8be770fa39b52a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 9 Feb 2024 10:35:41 -0600
Subject: [PATCH 017/210] CH-100 Add subdomain configuration
---
deployment-configuration/compose/templates/auto-compose.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index cdf8dddd..9ba6a3ef 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -48,8 +48,8 @@ services:
links:
{{- range . }}
- {{ . }}
- {{- with $app_config.harness.domain }}
- :{{- . }}
+ {{- with $app_config.harness.subdomain }}
+ {{- ":" }}{{ . }}.{{ $.Values.domain }}
{{- end }}
{{- end }}
{{- end }}
From b97c19c827e3dbd4a51ff2df273b0e63e810be1a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 9 Feb 2024 12:20:31 -0600
Subject: [PATCH 018/210] CH-100 Fix bad "links" generation
---
.../compose/templates/auto-compose.yaml | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 9ba6a3ef..b15d32cd 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -47,10 +47,7 @@ services:
{{- with $app_config.harness.dependencies.soft }}
links:
{{- range . }}
- - {{ . }}
- {{- with $app_config.harness.subdomain }}
- {{- ":" }}{{ . }}.{{ $.Values.domain }}
- {{- end }}
+ - {{ . }}:{{ . }}.{{ $.Values.domain }}
{{- end }}
{{- end }}
{{- with $app_config.harness.dependencies.hard }}
@@ -77,7 +74,7 @@ services:
{{- end }}
traefik:
- image: "traefik:v2.2"
+ image: "traefik:v2.10"
container_name: "traefik"
networks:
- ch
From 85dcfd93cb131bd29097479ee6bcdd08ee966dbc Mon Sep 17 00:00:00 2001
From: aranega
Date: Mon, 12 Feb 2024 12:54:00 -0600
Subject: [PATCH 019/210] CH-100 Add support for aliases and service links
---
.../compose/templates/auto-compose.yaml | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index b15d32cd..512ba0db 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -2,16 +2,22 @@ version: '3.7'
services:
{{- range $app_name, $app_config := .Values.apps }}
- {{- if has $app_name (list "argo" "nfsserver" "workflows" "events" ) }} {{- /* We deactivate generation for some services */}}
+ {{ $deployment := $app_config.harness.deployment }}
+ {{- if or (not $deployment.auto) (not $app_config.harness.service.auto) }}
{{- continue }}
{{- end}}
- {{ $deployment := $app_config.harness.deployment }}
{{ $app_name }}:
{{- with $app_config.domain }}
domainname: {{ . }}
{{- end }}
networks:
- - ch
+ {{- if ne $app_config.harness.service.name $app_name}}
+ ch:
+ aliases:
+ - {{ $app_config.harness.service.name }}
+ {{- else }}
+ - ch
+ {{- end}}
{{- with $app_config.image }}
image: {{ . }}
{{- end }}
@@ -47,7 +53,8 @@ services:
{{- with $app_config.harness.dependencies.soft }}
links:
{{- range . }}
- - {{ . }}:{{ . }}.{{ $.Values.domain }}
+ {{- $service_name := (get $.Values.apps .).harness.service.name }}
+ - {{ . }}:{{ $service_name }}.{{ $.Values.domain }}
{{- end }}
{{- end }}
{{- with $app_config.harness.dependencies.hard }}
From 14292113f978415b7cff3a69e937785602449622 Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Tue, 13 Feb 2024 18:38:03 +0100
Subject: [PATCH 020/210] CH-118 update node version
---
.../base-images/cloudharness-frontend-build/Dockerfile | 2 +-
test/test-e2e/Dockerfile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/infrastructure/base-images/cloudharness-frontend-build/Dockerfile b/infrastructure/base-images/cloudharness-frontend-build/Dockerfile
index ec4c5990..412125e6 100644
--- a/infrastructure/base-images/cloudharness-frontend-build/Dockerfile
+++ b/infrastructure/base-images/cloudharness-frontend-build/Dockerfile
@@ -1,3 +1,3 @@
-FROM node:15.5
+FROM node:20
diff --git a/test/test-e2e/Dockerfile b/test/test-e2e/Dockerfile
index 48c8f855..ef17adee 100644
--- a/test/test-e2e/Dockerfile
+++ b/test/test-e2e/Dockerfile
@@ -1,4 +1,4 @@
-FROM node:lts-slim
+FROM node:20
# Install latest chrome dev package and fonts to support major charsets (Chinese, Japanese, Arabic, Hebrew, Thai and a few others)
# Note: this installs the necessary libs to make the bundled version of Chromium that Puppeteer
From 0f81d23699d2a8d99597ae8275b0a317c6647d7e Mon Sep 17 00:00:00 2001
From: Filippo Ledda
Date: Tue, 13 Feb 2024 18:39:08 +0100
Subject: [PATCH 021/210] CH-118 clean unused constant
---
docs/dev.md | 1 -
libraries/cloudharness-utils/cloudharness_utils/constants.py | 4 ----
2 files changed, 5 deletions(-)
diff --git a/docs/dev.md b/docs/dev.md
index 113e0c86..996e6350 100644
--- a/docs/dev.md
+++ b/docs/dev.md
@@ -93,7 +93,6 @@ This file is part of the CloudHarness runtime.
Other constants are located there as shown in the following code extract.
```python
-NODE_BUILD_IMAGE = 'node:8.16.1-alpine'
APPLICATION_TEMPLATE_PATH = 'application-templates'
# ...
APPS_PATH = 'applications'
diff --git a/libraries/cloudharness-utils/cloudharness_utils/constants.py b/libraries/cloudharness-utils/cloudharness_utils/constants.py
index e2a6a48b..53282691 100644
--- a/libraries/cloudharness-utils/cloudharness_utils/constants.py
+++ b/libraries/cloudharness-utils/cloudharness_utils/constants.py
@@ -1,9 +1,5 @@
import os
-NODE_BUILD_IMAGE = 'node:8.16.1-alpine'
-
-
-
APPLICATION_TEMPLATE_PATH = 'application-templates'
DEFAULT_MERGE_PATH = ".overrides"
From 1ac82264e80fa1d19730d85036878ed4c75c6b81 Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 14 Feb 2024 10:34:54 -0600
Subject: [PATCH 022/210] CH-100 Add first support for auto databases
---
.../compose/templates/auto-compose.yaml | 23 ++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 512ba0db..80f4845b 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -78,8 +78,29 @@ services:
{{- end }}
{{- end}}
{{- end }}
+ {{- with $app_config.harness.database }}
+ {{- if not .auto }}
+ {{- continue}}
+ {{- end }}
+ # Database for {{ $app_name }}, type {{ .type }} named {{ .name }}
+ {{ .name }}:
+ {{- $db_infos := (get . .type) }}
+ image: {{ $db_infos.image }}
+ expose:
+ {{- range $port := $db_infos.ports }}
+ - {{ $port.port | quote }}
+ {{- end }}
+ {{- with .resources }}
+ resources:
+ limits:
+ cpus: {{ .limits.cpu | default "1000m" }}
+ memory: {{ trimSuffix "i" .limits.memory | default "2G" }}
+ reservations:
+ cpus: {{ .requests.cpu | default "100m" }}
+ memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
+ {{- end }}
+ {{- end}}
{{- end }}
-
traefik:
image: "traefik:v2.10"
container_name: "traefik"
From 56e22b7b883a57cc0eb9c672c5b00f43cc8172a0 Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 14 Feb 2024 10:41:32 -0600
Subject: [PATCH 023/210] CH-100 Add finer grain port handling
---
.../compose/templates/auto-compose.yaml | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 80f4845b..c432d522 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -24,6 +24,10 @@ services:
{{- with $app_config.harness.service.port }}
ports:
- "{{ . }}:{{ $app_config.harness.deployment.port }}"
+ {{- end }}
+ {{- with $app_config.harness.deployment.port }}
+ expose:
+ - {{ . | quote }}
{{- end}}
deploy:
mode: "replicated"
@@ -85,6 +89,8 @@ services:
# Database for {{ $app_name }}, type {{ .type }} named {{ .name }}
{{ .name }}:
{{- $db_infos := (get . .type) }}
+ networks:
+ ch:
image: {{ $db_infos.image }}
expose:
{{- range $port := $db_infos.ports }}
From 336b5585f7fa059b282cede4fcedd6023bcf563a Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 08:11:42 -0600
Subject: [PATCH 024/210] CH-100 Change way port is exposed to outside world
---
.../compose/templates/auto-compose.yaml | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index c432d522..894814f6 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -97,13 +97,14 @@ services:
- {{ $port.port | quote }}
{{- end }}
{{- with .resources }}
- resources:
- limits:
- cpus: {{ .limits.cpu | default "1000m" }}
- memory: {{ trimSuffix "i" .limits.memory | default "2G" }}
- reservations:
- cpus: {{ .requests.cpu | default "100m" }}
- memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
+ deploy:
+ resources:
+ limits:
+ cpus: {{ .limits.cpu | default "1000m" }}
+ memory: {{ trimSuffix "i" .limits.memory | default "2G" }}
+ reservations:
+ cpus: {{ .requests.cpu | default "100m" }}
+ memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
{{- end }}
{{- end}}
{{- end }}
From a004ffb80583dee5be24181789e8ca13cc8f508a Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 11:02:12 -0600
Subject: [PATCH 025/210] CH-100 Fix issue with env var quoting
---
.../compose/templates/auto-compose.yaml | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 894814f6..138826e3 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -21,10 +21,12 @@ services:
{{- with $app_config.image }}
image: {{ . }}
{{- end }}
+ {{- if eq $.Values.mainapp $app_name }}
{{- with $app_config.harness.service.port }}
ports:
- "{{ . }}:{{ $app_config.harness.deployment.port }}"
{{- end }}
+ {{- end }}
{{- with $app_config.harness.deployment.port }}
expose:
- {{ . | quote }}
@@ -40,19 +42,20 @@ services:
cpus: {{ $deployment.resources.requests.cpu | default "25m" }}
memory: {{ trimSuffix "i" $deployment.resources.requests.memory | default "32M" }}
{{- with $deployment.command }}
- entrypoint: {{ cat . $deployment.args }}
+ # entrypoint: {{ cat . $deployment.args }}
{{- end }}
environment:
- - CH_CURRENT_APP_NAME={{ $app_name | quote }}
+ - CH_CURRENT_APP_NAME={{ $app_name }}
+ - CH_VALUES_PATH=/opt/cloudharness/resources/allvalues.yaml
{{- range $.Values.env }}
- - {{ .name }}={{ .value | quote }}
+ - {{ .name }}={{ .value }}
{{- end }}
{{- /*{{- range $.Values.env }}
- - {{ .name }}={{ .value | quote }}
+ - {{ .name }}={{ .value }}
{{- end }} */}}
{{- range $app_config.harness.env }}
- - {{ .name }}={{ .value | quote }}
+ - {{ .name }}={{ .value }}
{{- end }}
{{- with $app_config.harness.dependencies.soft }}
links:
@@ -67,8 +70,9 @@ services:
- {{ . }}
{{- end }}
{{- end }}
- {{- if or $deployment.volume $app_config.harness.resources }}
volumes:
+ - ./compose/values.yaml:/opt/cloudharness/resources/allvalues.yaml:ro
+ {{- if or $deployment.volume $app_config.harness.resources }}
{{- with $deployment.volume }}
- type: volume
source: {{ .name }}
From f73108c2fa976d7654c76798cde9f2697e2ac23c Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 11:20:41 -0600
Subject: [PATCH 026/210] CH-100 Add special behavior to produce allvalues.yaml
---
.../compose/templates/auto-compose.yaml | 2 +-
.../ch_cli_tools/dockercompose.py | 19 +++++++++++++++++++
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 138826e3..3896ae13 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -71,7 +71,7 @@ services:
{{- end }}
{{- end }}
volumes:
- - ./compose/values.yaml:/opt/cloudharness/resources/allvalues.yaml:ro
+ - ./compose/allvalues.yaml:/opt/cloudharness/resources/allvalues.yaml:ro
{{- if or $deployment.volume $app_config.harness.resources }}
{{- with $deployment.volume }}
- type: volume
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 2c2a2c35..1c51abd6 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -11,6 +11,7 @@
import tarfile
from docker import from_env as DockerClient
from pathlib import Path
+import copy
from . import HERE, CH_ROOT
@@ -134,6 +135,12 @@ def process_values(self) -> HarnessMainConfig:
merge_to_yaml_file({'metadata': {'namespace': self.namespace},
'name': helm_values['name']}, self.helm_chart_path)
validate_helm_values(merged_values)
+
+ # All values save
+ all_values = self.__get_default_helm_values_with_secrets(merged_values)
+
+ merge_to_yaml_file(all_values, self.dest_deployment_path / 'allvalues.yaml')
+
return HarnessMainConfig.from_dict(merged_values)
def __process_applications(self, helm_values, base_image_name):
@@ -234,6 +241,18 @@ def __get_default_helm_values(self):
return helm_values
+ def __get_default_helm_values_with_secrets(self, helm_values):
+ helm_values = copy.deepcopy(helm_values)
+ # {{- $values_copy := deepCopy .Values }}
+ # {{- range $key, $val := .Values.apps }}
+ # {{- $new_secrets := dict "apps" (dict $key (dict "harness" (dict "secrets"))) }}
+ # {{- $tmp := mergeOverwrite $values_copy $new_secrets }}
+ # {{- end }}
+ # {{ $values_copy | toYaml | indent 4 }}
+ for key, val in helm_values['apps'].items():
+ helm_values['apps'][key]['harness']['secrets'] = {}
+ return helm_values
+
def create_tls_certificate(self, helm_values):
if not self.tls:
helm_values['tls'] = None
From 2370b20c646e2cb4c2c87221e746d23ab1ad5ec3 Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 12:08:01 -0600
Subject: [PATCH 027/210] CH-100 Add actual docker-compose.yaml generation in
harness-deployment
---
.../compose/templates/auto-compose.yaml | 1 -
.../ch_cli_tools/dockercompose.py | 12 ++++++++++++
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 3896ae13..4999b46f 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -46,7 +46,6 @@ services:
{{- end }}
environment:
- CH_CURRENT_APP_NAME={{ $app_name }}
- - CH_VALUES_PATH=/opt/cloudharness/resources/allvalues.yaml
{{- range $.Values.env }}
- {{ .name }}={{ .value }}
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 1c51abd6..dfe0bf5b 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -141,8 +141,20 @@ def process_values(self) -> HarnessMainConfig:
merge_to_yaml_file(all_values, self.dest_deployment_path / 'allvalues.yaml')
+ self.generate_docker_compose_yaml()
+
return HarnessMainConfig.from_dict(merged_values)
+ def generate_docker_compose_yaml(self):
+ compose_templates = self.dest_deployment_path
+ dest_compose_yaml = self.dest_deployment_path.parent / "docker-compose.yaml"
+
+ logging.info(f'Generate docker compose configuration in: {dest_compose_yaml}, using templates from {compose_templates}')
+ command = f"helm template {compose_templates} > {dest_compose_yaml}"
+
+ subprocess.call(command, shell=True)
+
+
def __process_applications(self, helm_values, base_image_name):
for root_path in self.root_paths:
app_values = init_app_values(
From 5ed630d8dcb9a12dbd7238dad51ad6ed03e3bbcd Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 12:40:31 -0600
Subject: [PATCH 028/210] CH-100 Add first dedicated templates for postgres
---
.../compose/templates/auto-compose.yaml | 7 ++++---
.../compose/templates/auto-database-postgres.yaml | 7 +++++++
2 files changed, 11 insertions(+), 3 deletions(-)
create mode 100644 deployment-configuration/compose/templates/auto-database-postgres.yaml
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 4999b46f..22f8c5a2 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -86,9 +86,9 @@ services:
{{- end}}
{{- end }}
{{- with $app_config.harness.database }}
- {{- if not .auto }}
- {{- continue}}
- {{- end }}
+ {{- if not .auto }}
+ {{- continue}}
+ {{- end }}
# Database for {{ $app_name }}, type {{ .type }} named {{ .name }}
{{ .name }}:
{{- $db_infos := (get . .type) }}
@@ -109,6 +109,7 @@ services:
cpus: {{ .requests.cpu | default "100m" }}
memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
{{- end }}
+ {{- include "deploy_utils.database.postgres" . }}
{{- end}}
{{- end }}
traefik:
diff --git a/deployment-configuration/compose/templates/auto-database-postgres.yaml b/deployment-configuration/compose/templates/auto-database-postgres.yaml
new file mode 100644
index 00000000..d832193f
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-database-postgres.yaml
@@ -0,0 +1,7 @@
+{{- define "deploy_utils.database.postgres" }}
+ environment:
+ - POSTGRES_DB={{ .postgres.initialdb | quote }}
+ - POSTGRES_USER={{ .user | quote }}
+ - POSTGRES_PASSWORD={{ .pass | quote }}
+ - PGDATA=/data/db/pgdata
+{{- end }}
\ No newline at end of file
From 111a4f3e5be5d7655faa783009106a0dafebfd6f Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 15 Feb 2024 13:09:12 -0600
Subject: [PATCH 029/210] CH-100 Add volumes for db
---
.../compose/templates/auto-compose.yaml | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 22f8c5a2..f14c7e11 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -109,7 +109,16 @@ services:
cpus: {{ .requests.cpu | default "100m" }}
memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
{{- end }}
- {{- include "deploy_utils.database.postgres" . }}
+ volumes:
+ - type: volume
+ source: {{ .name }}
+ target: /data/db
+ {{- if eq .type "postgres" }}
+ - type: volume
+ source: dshm
+ target: /dev/shm
+ {{- include "deploy_utils.database.postgres" . }}
+ {{- end }}
{{- end}}
{{- end }}
traefik:
@@ -142,4 +151,10 @@ volumes: # this inclusion needs to be conditional
{{- with $app_config.harness.deployment.volume }}
{{ .name }}:
{{- end }}
+ {{- with $app_config.harness.database }}
+ {{ .name }}:
+ {{- if eq .type "postgres" }}
+ dshm:
+ {{- end }}
+ {{- end }}
{{- end }}
\ No newline at end of file
From ce4596ab1b62110c56c0bb1ef339cf9cc7cbdc91 Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 16 Feb 2024 06:46:34 -0600
Subject: [PATCH 030/210] CH-100 Add first template for events
---
.../templates-compose/events-deployment.yaml | 13 +++++
.../compose/templates/auto-compose.yaml | 47 +++++++++++--------
2 files changed, 40 insertions(+), 20 deletions(-)
create mode 100644 applications/events/deploy/templates-compose/events-deployment.yaml
diff --git a/applications/events/deploy/templates-compose/events-deployment.yaml b/applications/events/deploy/templates-compose/events-deployment.yaml
new file mode 100644
index 00000000..f16e7e38
--- /dev/null
+++ b/applications/events/deploy/templates-compose/events-deployment.yaml
@@ -0,0 +1,13 @@
+{{- define "events.deployment" }}
+{{- $nfs := .apps.nfsserver}}
+
+{{ $nfs.name }}:
+ image: {{ $nfs.harness.deployment.image }}
+ environment:
+ # NFS useDNS? {{ $nfs.nfs.useDNS }}
+ {{- if $nfs.nfs.useDNS }}
+ - NFS_SERVER={{ printf "nfs-server.%s.svc.cluster.local" .namespace }}
+ {{- end }}
+ - NFS_PATH={{ $nfs.nfs.path }}
+ - PROVISIONER_NAME={{ printf "%s-nfs-provisioner" .namespace }}
+{{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index f14c7e11..b51aa02e 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -1,8 +1,32 @@
version: '3.7'
services:
+ traefik:
+ image: "traefik:v2.10"
+ container_name: "traefik"
+ networks:
+ - ch
+ command:
+ - "--log.level=INFO"
+ - "--api.insecure=true"
+ - "--providers.docker=true"
+ - "--providers.docker.exposedbydefault=false"
+ - "--entrypoints.web.address=:80"
+ - "--entrypoints.websecure.address=:443"
+ - "--providers.file.directory=/etc/traefik/dynamic_conf"
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock:ro"
+ - "./certs/:/certs/:ro"
+ - "./traefik.yaml:/etc/traefik/dynamic_conf/conf.yml:ro"
+
{{- range $app_name, $app_config := .Values.apps }}
{{ $deployment := $app_config.harness.deployment }}
+ {{- if eq $app_name "nfsserver" }}
+ {{- include "events.deployment" $.Values}}
+ {{- end }}
{{- if or (not $deployment.auto) (not $app_config.harness.service.auto) }}
{{- continue }}
{{- end}}
@@ -120,28 +144,11 @@ services:
{{- include "deploy_utils.database.postgres" . }}
{{- end }}
{{- end}}
+
{{- end }}
- traefik:
- image: "traefik:v2.10"
- container_name: "traefik"
- networks:
- - ch
- command:
- - "--log.level=INFO"
- - "--api.insecure=true"
- - "--providers.docker=true"
- - "--providers.docker.exposedbydefault=false"
- - "--entrypoints.web.address=:80"
- - "--entrypoints.websecure.address=:443"
- - "--providers.file.directory=/etc/traefik/dynamic_conf"
- ports:
- - "80:80"
- - "443:443"
- volumes:
- - "/var/run/docker.sock:/var/run/docker.sock:ro"
- - "./certs/:/certs/:ro"
- - "./traefik.yaml:/etc/traefik/dynamic_conf/conf.yml:ro"
+
+# Network definition
networks:
ch:
name: ch_network
From 3868b56b476c97d0b3f2d84ea44c96f12339341a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 16 Feb 2024 07:04:48 -0600
Subject: [PATCH 031/210] CH-100 Refactor templates for the db
---
.../compose/templates/auto-compose.yaml | 35 ++-----------------
.../templates/auto-database-postgres.yaml | 10 +++---
.../compose/templates/auto-database.yaml | 32 +++++++++++++++++
3 files changed, 39 insertions(+), 38 deletions(-)
create mode 100644 deployment-configuration/compose/templates/auto-database.yaml
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index b51aa02e..0ac27f32 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -25,7 +25,7 @@ services:
{{- range $app_name, $app_config := .Values.apps }}
{{ $deployment := $app_config.harness.deployment }}
{{- if eq $app_name "nfsserver" }}
- {{- include "events.deployment" $.Values}}
+ {{- include "events.deployment" $.Values | indent 2 }}
{{- end }}
{{- if or (not $deployment.auto) (not $app_config.harness.service.auto) }}
{{- continue }}
@@ -113,41 +113,10 @@ services:
{{- if not .auto }}
{{- continue}}
{{- end }}
- # Database for {{ $app_name }}, type {{ .type }} named {{ .name }}
- {{ .name }}:
- {{- $db_infos := (get . .type) }}
- networks:
- ch:
- image: {{ $db_infos.image }}
- expose:
- {{- range $port := $db_infos.ports }}
- - {{ $port.port | quote }}
- {{- end }}
- {{- with .resources }}
- deploy:
- resources:
- limits:
- cpus: {{ .limits.cpu | default "1000m" }}
- memory: {{ trimSuffix "i" .limits.memory | default "2G" }}
- reservations:
- cpus: {{ .requests.cpu | default "100m" }}
- memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
- {{- end }}
- volumes:
- - type: volume
- source: {{ .name }}
- target: /data/db
- {{- if eq .type "postgres" }}
- - type: volume
- source: dshm
- target: /dev/shm
- {{- include "deploy_utils.database.postgres" . }}
- {{- end }}
+ {{- include "db.deployment" . | indent 2}}
{{- end}}
-
{{- end }}
-
# Network definition
networks:
ch:
diff --git a/deployment-configuration/compose/templates/auto-database-postgres.yaml b/deployment-configuration/compose/templates/auto-database-postgres.yaml
index d832193f..b6db420c 100644
--- a/deployment-configuration/compose/templates/auto-database-postgres.yaml
+++ b/deployment-configuration/compose/templates/auto-database-postgres.yaml
@@ -1,7 +1,7 @@
{{- define "deploy_utils.database.postgres" }}
- environment:
- - POSTGRES_DB={{ .postgres.initialdb | quote }}
- - POSTGRES_USER={{ .user | quote }}
- - POSTGRES_PASSWORD={{ .pass | quote }}
- - PGDATA=/data/db/pgdata
+ environment:
+ - POSTGRES_DB={{ .postgres.initialdb }}
+ - POSTGRES_USER={{ .user }}
+ - POSTGRES_PASSWORD={{ .pass }}
+ - PGDATA=/data/db/pgdata
{{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/templates/auto-database.yaml b/deployment-configuration/compose/templates/auto-database.yaml
new file mode 100644
index 00000000..70bda63a
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-database.yaml
@@ -0,0 +1,32 @@
+{{- define "db.deployment" }}
+# Database type {{ .type }} named {{ .name }}
+{{ .name }}:
+ {{- $db_infos := (get . .type) }}
+ networks:
+ ch:
+ image: {{ $db_infos.image }}
+ expose:
+ {{- range $port := $db_infos.ports }}
+ - {{ $port.port | quote }}
+ {{- end }}
+ {{- with .resources }}
+ deploy:
+ resources:
+ limits:
+ cpus: {{ .limits.cpu | default "1000m" }}
+ memory: {{ trimSuffix "i" .limits.memory | default "2G" }}
+ reservations:
+ cpus: {{ .requests.cpu | default "100m" }}
+ memory: {{ trimSuffix "i" .requests.memory | default "512M" }}
+ {{- end }}
+ volumes:
+ - type: volume
+ source: {{ .name }}
+ target: /data/db
+ {{- if eq .type "postgres" }}
+ - type: volume
+ source: dshm
+ target: /dev/shm
+ {{- include "deploy_utils.database.postgres" . }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
From a0476d6cd29401a486e96d8654490e784bd0bcac Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 16 Feb 2024 07:13:09 -0600
Subject: [PATCH 032/210] CH-100 Add /etc/hosts generation
---
.../compose/templates/auto-compose.yaml | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 0ac27f32..04db3327 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -104,11 +104,22 @@ services:
{{- with $app_config.harness.resources }}
{{- range .}}
- type: bind
- source: compose/resources/{{ $app_name }}/{{.src }}
+ source: compose/resources/{{ $app_name }}/{{ .src }}
target: {{ .dst }}
{{- end }}
{{- end}}
{{- end }}
+ {{- if $.Values.local }}
+ {{- $domain := $.Values.domain }}
+ {{- $ip := $.Values.localIp }}
+ extra_hosts:
+ - "{{ $.Values.domain }}:{{ $ip }}"
+ {{- range $app := $.Values.apps }}
+ {{- with $app.harness.subdomain}}
+ - "{{ . }}.{{ $domain }}:{{ $ip }}"
+ {{- end }}
+ {{- end }}
+ {{- end }}
{{- with $app_config.harness.database }}
{{- if not .auto }}
{{- continue}}
From bd3e423a08b34ece388d2cf175b9b6b270d4747a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 16 Feb 2024 07:27:20 -0600
Subject: [PATCH 033/210] CH-100 Add specific varenvs by container
---
.../compose/templates/auto-compose.yaml | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 04db3327..e05934b9 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -74,9 +74,13 @@ services:
{{- range $.Values.env }}
- {{ .name }}={{ .value }}
{{- end }}
- {{- /*{{- range $.Values.env }}
- - {{ .name }}={{ .value }}
- {{- end }} */}}
+ {{- with $.Values.apps.accounts }}
+ - CH_ACCOUNTS_CLIENT_SECRET={{ .client.secret }}
+ - CH_ACCOUNTS_REALM={{ $.Values.namespace }}
+ - CH_ACCOUNTS_AUTH_DOMAIN={{ printf "%s.%s" .harness.subdomain $.Values.domain }}
+ - CH_ACCOUNTS_CLIENT_ID={{ .client.id }}
+ - DOMAIN={{ $.Values.domain }}
+ {{- end}}
{{- range $app_config.harness.env }}
- {{ .name }}={{ .value }}
{{- end }}
@@ -110,6 +114,7 @@ services:
{{- end}}
{{- end }}
{{- if $.Values.local }}
+ # Extra /etc/hosts list
{{- $domain := $.Values.domain }}
{{- $ip := $.Values.localIp }}
extra_hosts:
From 8851c48ef3777aef08144bcda1e309680cc878ca Mon Sep 17 00:00:00 2001
From: aranega
Date: Tue, 20 Feb 2024 11:06:17 -0600
Subject: [PATCH 034/210] CH-100 Rename some templates
---
.../nfsserver-deployment.yaml} | 5 +-
.../compose/templates/auto-compose.yaml | 24 +++++-----
.../ch_cli_tools/dockercompose.py | 46 ++++++++++++++++++-
3 files changed, 60 insertions(+), 15 deletions(-)
rename applications/{events/deploy/templates-compose/events-deployment.yaml => nfsserver/deploy/templates-compose/nfsserver-deployment.yaml} (74%)
diff --git a/applications/events/deploy/templates-compose/events-deployment.yaml b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
similarity index 74%
rename from applications/events/deploy/templates-compose/events-deployment.yaml
rename to applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
index f16e7e38..2dd8509b 100644
--- a/applications/events/deploy/templates-compose/events-deployment.yaml
+++ b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
@@ -1,7 +1,10 @@
-{{- define "events.deployment" }}
+{{- define "nfsserver.deployment" }}
{{- $nfs := .apps.nfsserver}}
{{ $nfs.name }}:
+ build:
+ context: {{ $nfs.build.context }}
+ dockerfile: {{ $nfs.build.dockerfile }}
image: {{ $nfs.harness.deployment.image }}
environment:
# NFS useDNS? {{ $nfs.nfs.useDNS }}
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index e05934b9..f174d943 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -25,7 +25,7 @@ services:
{{- range $app_name, $app_config := .Values.apps }}
{{ $deployment := $app_config.harness.deployment }}
{{- if eq $app_name "nfsserver" }}
- {{- include "events.deployment" $.Values | indent 2 }}
+ {{- include "nfsserver.deployment" $.Values | indent 2 }}
{{- end }}
{{- if or (not $deployment.auto) (not $app_config.harness.service.auto) }}
{{- continue }}
@@ -114,16 +114,16 @@ services:
{{- end}}
{{- end }}
{{- if $.Values.local }}
- # Extra /etc/hosts list
- {{- $domain := $.Values.domain }}
- {{- $ip := $.Values.localIp }}
- extra_hosts:
- - "{{ $.Values.domain }}:{{ $ip }}"
- {{- range $app := $.Values.apps }}
- {{- with $app.harness.subdomain}}
- - "{{ . }}.{{ $domain }}:{{ $ip }}"
- {{- end }}
- {{- end }}
+ # Extra /etc/hosts list
+ {{- $domain := $.Values.domain }}
+ {{- $ip := $.Values.localIp }}
+ extra_hosts:
+ - "{{ $.Values.domain }}={{ $ip }}"
+ {{- range $app := $.Values.apps }}
+ {{- with $app.harness.subdomain}}
+ - "{{ . }}.{{ $domain }}={{ $ip }}"
+ {{- end }}
+ {{- end }}
{{- end }}
{{- with $app_config.harness.database }}
{{- if not .auto }}
@@ -149,4 +149,4 @@ volumes: # this inclusion needs to be conditional
dshm:
{{- end }}
{{- end }}
-{{- end }}
\ No newline at end of file
+{{- end }}
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index dfe0bf5b..6563eac0 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -29,6 +29,7 @@
KEY_DEPLOYMENT = 'deployment'
KEY_APPS = 'apps'
KEY_TASK_IMAGES = 'task-images'
+# KEY_TASK_IMAGES_BUILD = f"{KEY_TASK_IMAGES}-build"
KEY_TEST_IMAGES = 'test-images'
DEFAULT_IGNORE = ('/tasks', '.dockerignore', '.hypothesis', "__pycache__", '.node_modules', 'dist', 'build', '.coverage')
@@ -180,6 +181,30 @@ def collect_app_values(self, app_base_path, base_image_name=None):
app_values = self.create_app_values_spec(app_name, app_path, base_image_name=base_image_name)
+ # dockerfile_path = next(app_path.rglob('**/Dockerfile'), None)
+ # # for dockerfile_path in app_path.rglob('**/Dockerfile'):
+ # # parent_name = dockerfile_path.parent.name.replace("-", "_")
+ # # if parent_name == app_key:
+ # # app_values['build'] = {
+ # # # 'dockerfile': f"{dockerfile_path.relative_to(app_path)}",
+ # # 'dockerfile': "Dockerfile",
+ # # 'context': os.path.relpath(dockerfile_path.parent, self.dest_deployment_path.parent),
+ # # }
+ # # elif "tasks/" in f"{dockerfile_path}":
+ # # parent_name = parent_name.upper()
+ # # values.setdefault("task-images-build", {})[parent_name] = {
+ # # 'dockerfile': "Dockerfile",
+ # # 'context': os.path.relpath(dockerfile_path.parent, self.dest_deployment_path.parent),
+ # # }
+ # # import ipdb; ipdb.set_trace() # fmt: skip
+
+ # if dockerfile_path:
+ # app_values['build'] = {
+ # # 'dockerfile': f"{dockerfile_path.relative_to(app_path)}",
+ # 'dockerfile': "Dockerfile",
+ # 'context': os.path.relpath(dockerfile_path.parent, self.dest_deployment_path.parent),
+ # }
+
values[app_key] = dict_merge(
values[app_key], app_values) if app_key in values else app_values
@@ -201,10 +226,15 @@ def __assign_static_build_dependencies(self, helm_values):
for dep in dependencies:
if dep in self.base_images and dep not in helm_values[KEY_TASK_IMAGES]:
helm_values[KEY_TASK_IMAGES][dep] = self.base_images[dep]
+ # helm_values.setdefault(KEY_TASK_IMAGES_BUILD, {})[dep] = {
+ # 'context': os.path.relpath(static_img_dockerfile, self.dest_deployment_path.parent),
+ # 'dockerfile': 'Dockerfile',
+ # }
for image_name in helm_values[KEY_TASK_IMAGES].keys():
if image_name in self.exclude:
del helm_values[KEY_TASK_IMAGES][image_name]
+ # del helm_values[KEY_TASK_IMAGES_BUILD][image_name]
def __init_base_images(self, base_image_name):
@@ -501,8 +531,20 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
task_path, app_path.parent))
img_name = image_name_from_dockerfile_path(task_name, base_image_name)
- values[KEY_TASK_IMAGES][task_name] = self.image_tag(
- img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys())
+ # import ipdb; ipdb.set_trace() # fmt: skip
+
+ # values[KEY_TASK_IMAGES][task_name] = self.image_tag(
+ # img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys())
+ # values.setdefault(KEY_TASK_IMAGES_BUILD, {})[task_name] = {
+ # 'context': os.path.relpath(task_path, self.dest_deployment_path.parent),
+ # 'dockerfile': 'Dockerfile',
+ # }
+
+ values[KEY_TASK_IMAGES][task_name] = {
+ 'name': self.image_tag(img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys()),
+ # 'context': os.path.relpath(task_path, self.dest_deployment_path.parent),
+ # 'dockerfile': 'Dockerfile',
+ }
return values
From ced7cd2b189c2e40ab3f77e95fb3437eb5fa909a Mon Sep 17 00:00:00 2001
From: aranega
Date: Tue, 20 Feb 2024 11:28:00 -0600
Subject: [PATCH 035/210] CH-100 Add first traeffik configuration
---
.../templates-compose/nfsserver-deployment.yaml | 3 ---
.../compose/templates/auto-compose.yaml | 9 +++++++++
.../ch_cli_tools/dockercompose.py | 12 +++++++-----
3 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
index 2dd8509b..7e9b6819 100644
--- a/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
+++ b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
@@ -2,9 +2,6 @@
{{- $nfs := .apps.nfsserver}}
{{ $nfs.name }}:
- build:
- context: {{ $nfs.build.context }}
- dockerfile: {{ $nfs.build.dockerfile }}
image: {{ $nfs.harness.deployment.image }}
environment:
# NFS useDNS? {{ $nfs.nfs.useDNS }}
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index f174d943..11996c91 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -125,6 +125,15 @@ services:
{{- end }}
{{- end }}
{{- end }}
+ labels:
+ - "traefik.enable=true"
+ {{- with $app_config.harness.service.port }}
+ - "traefik.http.services.{{ $app_name }}.loadbalancer.server.port={{ . }}"
+ {{- end }}
+ # - "traefik.http.middlewares.redirect-middleware.redirectscheme.scheme=https"
+ # - "traefik.http.routers.{{ .app_name }}.middlewares=redirect-middleware"
+ - "traefik.http.routers.{{ $app_name }}.rule=Host(`{{ $app_config.harness.subdomain }}.{{ $.Values.domain }}`)"
+ - "traefik.http.routers.{{ $app_name }}.entrypoints=web"
{{- with $app_config.harness.database }}
{{- if not .auto }}
{{- continue}}
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 6563eac0..83e04024 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -540,11 +540,13 @@ def create_app_values_spec(self, app_name, app_path, base_image_name=None):
# 'dockerfile': 'Dockerfile',
# }
- values[KEY_TASK_IMAGES][task_name] = {
- 'name': self.image_tag(img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys()),
- # 'context': os.path.relpath(task_path, self.dest_deployment_path.parent),
- # 'dockerfile': 'Dockerfile',
- }
+ # values[KEY_TASK_IMAGES][task_name] = {
+ # 'name': self.image_tag(img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys()),
+ # # 'context': os.path.relpath(task_path, self.dest_deployment_path.parent),
+ # # 'dockerfile': 'Dockerfile',
+ # }
+
+ values[KEY_TASK_IMAGES][task_name] = self.image_tag(img_name, build_context_path=task_path, dependencies=values[KEY_TASK_IMAGES].keys())
return values
From f7ebeb03606302e9ffc56bf010ad49ad36cf428b Mon Sep 17 00:00:00 2001
From: aranega
Date: Tue, 20 Feb 2024 11:40:27 -0600
Subject: [PATCH 036/210] CH-100 Fix bad indentation in docker-compose.yaml
---
deployment-configuration/compose/templates/auto-compose.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 11996c91..b175f2b5 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -125,7 +125,7 @@ services:
{{- end }}
{{- end }}
{{- end }}
- labels:
+ labels:
- "traefik.enable=true"
{{- with $app_config.harness.service.port }}
- "traefik.http.services.{{ $app_name }}.loadbalancer.server.port={{ . }}"
From 8fdcba6e840340b4a347eb6ec00c61c0547ee645 Mon Sep 17 00:00:00 2001
From: aranega
Date: Tue, 20 Feb 2024 11:41:41 -0600
Subject: [PATCH 037/210] CH-100 Remove exposition of ports
---
.../compose/templates/auto-compose.yaml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index b175f2b5..f25f558e 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -46,10 +46,10 @@ services:
image: {{ . }}
{{- end }}
{{- if eq $.Values.mainapp $app_name }}
- {{- with $app_config.harness.service.port }}
- ports:
- - "{{ . }}:{{ $app_config.harness.deployment.port }}"
- {{- end }}
+ # {{- with $app_config.harness.service.port }}
+ # ports:
+ # - "{{ . }}:{{ $app_config.harness.deployment.port }}"
+ # {{- end }}
{{- end }}
{{- with $app_config.harness.deployment.port }}
expose:
From 428a6191b0ec0fd5a09a59082b2d376785967328 Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 21 Feb 2024 06:48:39 -0600
Subject: [PATCH 038/210] CH-100 Add post-process mechanism to generate files
---
.../ch_cli_tools/dockercompose.py | 26 +++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index 83e04024..c088dc44 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -155,6 +155,32 @@ def generate_docker_compose_yaml(self):
subprocess.call(command, shell=True)
+ self.__post_process_multiple_document_docker_compose(dest_compose_yaml)
+
+ def __post_process_multiple_document_docker_compose(self, yaml_document):
+ if not yaml_document.exists():
+ logging.warning("Something went wrong during the docker-compose.yaml generation, cannot post-process it")
+ return
+
+ with open(yaml_document, "r") as f:
+ documents = yaml.safe_load_all(f)
+
+ for document in documents:
+ if "cloudharness-metadata" in document:
+ document_path = self.dest_deployment_path / document["cloudharness-metadata"]["path"]
+ logging.info("Post-process docker-compose.yaml, creating %s", document_path)
+ document_path.write_text(document["data"])
+ else:
+ with open(yaml_document, "w") as f:
+ yaml.dump(document, f)
+
+ # cloudharness-metadata:
+ # path: secrets.yaml
+
+ # data: |
+ # sdfmsldkf
+ # sdfmlskdfmslkdfs
+ # sdmlksdf
def __process_applications(self, helm_values, base_image_name):
for root_path in self.root_paths:
From 9d6c8b2f015c5269cb44dd27c9ae96038a39b46e Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 21 Feb 2024 08:12:00 -0600
Subject: [PATCH 039/210] CH-100 Add new templates
---
.../deploy/templates-compose/deployments.yaml | 97 +++++++++++++++++++
.../compose/templates/allvalues-template.yaml | 20 ++++
2 files changed, 117 insertions(+)
create mode 100644 applications/events/deploy/templates-compose/deployments.yaml
create mode 100644 deployment-configuration/compose/templates/allvalues-template.yaml
diff --git a/applications/events/deploy/templates-compose/deployments.yaml b/applications/events/deploy/templates-compose/deployments.yaml
new file mode 100644
index 00000000..0001fde4
--- /dev/null
+++ b/applications/events/deploy/templates-compose/deployments.yaml
@@ -0,0 +1,97 @@
+{{- define "events.deployment" }}
+events:
+ networks:
+ - ch
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ ports:
+ - "9094:9092"
+ expose:
+ - 5555
+ - 9094
+ - 9092
+ environment:
+ - CLASSPATH=/opt/kafka/libs/extensions/*
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ - JMX_PORT=5555
+ command:
+ - "./bin/kafka-server-start.sh"
+ - "/etc/kafka/server.properties"
+ - "--override"
+ - "default.replication.factor=1"
+ - "--override"
+ - "min.insync.replicas=1"
+ - "--override"
+ - "offsets.topic.replication.factor=1"
+ - "--override"
+ - "offsets.topic.num.partitions=1"
+ depends_on:
+ events-kafka-init:
+ condition: service_completed_successfully
+
+events-kafka-init:
+ networks:
+ - ch
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+
+pzoo:
+ networks:
+ - ch
+ expose:
+ - 2181
+ - 2888
+ - 3888
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ command:
+ - "./bin/zookeeper-server-start.sh"
+ - "/etc/kafka/zookeeper.properties"
+ environment:
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ depends_on:
+ events-pzoo-init:
+ condition: service_completed_successfully
+
+events-pzoo-init:
+ networks:
+ - ch
+ expose:
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+ - PZOO_REPLICAS=1
+ - ZOO_REPLICAS=0
+
+zoo:
+ networks:
+ - ch
+ expose:
+ - 2181
+ - 2888
+ - 3888
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ command:
+ - "./bin/zookeeper-server-start.sh"
+ - "/etc/kafka/zookeeper.properties"
+ environment:
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ depends_on:
+ events-zoo-init:
+ condition: service_completed_successfully
+
+events-zoo-init:
+ networks:
+ - ch
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+ - PZOO_REPLICAS=1
+ - ZOO_REPLICAS=0
+ - ID_OFFSET=2
+{{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/templates/allvalues-template.yaml b/deployment-configuration/compose/templates/allvalues-template.yaml
new file mode 100644
index 00000000..d69538aa
--- /dev/null
+++ b/deployment-configuration/compose/templates/allvalues-template.yaml
@@ -0,0 +1,20 @@
+{{- /*
+to replace the secrets values we create a dict with the structure:
+ app:
+ :
+ harness:
+ secrets:
+
+thus with an empty secrets node
+and then it's mergeOverwrite the copy of the .Values we created
+resulting in a copy of the .Values with all secrets being ""
+*/ -}}
+cloudharness-metadata:
+ path: allvalues2.yaml
+data: |
+{{- $values_copy := deepCopy .Values }}
+{{- range $key, $val := .Values.apps }}
+ {{- $new_secrets := dict "apps" (dict $key (dict "harness" (dict "secrets"))) }}
+ {{- $tmp := mergeOverwrite $values_copy $new_secrets }}
+{{- end }}
+{{ $values_copy | toYaml | indent 4 }}
From ae7cf1918e0d2d370eabdf2ceb5b7c1db3de43cf Mon Sep 17 00:00:00 2001
From: aranega
Date: Wed, 21 Feb 2024 10:33:20 -0600
Subject: [PATCH 040/210] CH-100 Add new templates (not modified yet)
---
.../compose/templates/auto-gatekeepers.yaml | 174 ++++++++++++++++++
.../compose/templates/auto-secrets.yaml | 50 +++++
.../ch_cli_tools/dockercompose.py | 19 +-
3 files changed, 233 insertions(+), 10 deletions(-)
create mode 100644 deployment-configuration/compose/templates/auto-gatekeepers.yaml
create mode 100644 deployment-configuration/compose/templates/auto-secrets.yaml
diff --git a/deployment-configuration/compose/templates/auto-gatekeepers.yaml b/deployment-configuration/compose/templates/auto-gatekeepers.yaml
new file mode 100644
index 00000000..898995cd
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-gatekeepers.yaml
@@ -0,0 +1,174 @@
+{{/* Secured Services/Deployments */}}
+{{- define "deploy_utils.securedservice" }}
+{{- $tls := not (not .root.Values.tls) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "{{ .app.harness.service.name }}-gk"
+ labels:
+ app: "{{ .app.harness.service.name }}-gk"
+data:
+ proxy.yml: |-
+ verbose: {{ .root.Values.debug }}
+ discovery-url: {{ ternary "https" "http" $tls}}://{{ .root.Values.apps.accounts.harness.subdomain }}.{{ .root.Values.domain }}/auth/realms/{{ .root.Values.namespace }}
+ client-id: {{ .root.Values.apps.accounts.webclient.id | quote }}
+ client-secret: {{ .root.Values.apps.accounts.webclient.secret }}
+ secure-cookie: {{ $tls }}
+ forbidden-page: /templates/access-denied.html.tmpl
+ enable-default-deny: {{ eq (.app.harness.secured | toString) "true" }}
+ listen: 0.0.0.0:8080
+ enable-refresh-tokens: true
+ server-write-timeout: {{ .app.harness.proxy.timeout.send | default .root.Values.proxy.timeout.send | default 180 }}s
+ upstream-timeout: {{ .app.harness.proxy.timeout.read | default .root.Values.proxy.timeout.read | default 180 }}s
+ upstream-response-header-timeout: {{ .app.harness.proxy.timeout.read | default .root.Values.proxy.timeout.read | default 180 }}s
+ upstream-expect-continue-timeout: {{ .app.harness.proxy.timeout.read | default .root.Values.proxy.timeout.read | default 180 }}s
+ server-read-timeout: {{ .app.harness.proxy.timeout.read | default .root.Values.proxy.timeout.read | default 180 }}s
+ upstream-keepalive-timeout: {{ .app.harness.proxy.timeout.keepalive | default .root.Values.proxy.timeout.keepalive | default 180 }}s
+ http-only-cookie: false
+ tls-cert:
+ tls-private-key:
+ redirection-url: {{ ternary "https" "http" $tls }}://{{ .app.harness.subdomain }}.{{ .root.Values.domain }}
+ encryption-key: AgXa7xRcoClDEU0ZDSH4X0XhL5Qy2Z2j
+ upstream-url: http://{{ .app.harness.service.name }}.{{ .app.namespace | default .root.Release.Namespace }}:{{ .app.harness.service.port | default 80}}
+ {{ if .app.harness.secured }}
+ {{ with .app.harness.uri_role_mapping }}
+ resources:
+ {{. | toYaml | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ {{ if or .root.Values.local (not $tls) }}
+ skip-openid-provider-tls-verify: true
+ skip-upstream-tls-verify: true
+ {{- end }}
+ cacert.crt: {{ .files.Get "resources/certs/cacert.crt" | quote }}
+ access-denied.html.tmpl: |-
+
+
+
+
+ 403 - Access Forbidden
+
+
+
+
+
+
+
+
+
+
+
Oops!
+
403 Permission Denied
+
+ Sorry, you do not have access to this page, please contact your administrator.
+ If you have been assigned new authorizations, try to refresh the page or to login again.
+
+
+
+
+
+
+
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: "{{ .app.harness.service.name }}-gk"
+ labels:
+ app: "{{ .app.harness.service.name }}-gk"
+spec:
+ ports:
+ - name: http
+ port: 8080
+ selector:
+ app: "{{ .app.harness.service.name }}-gk"
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: "{{ .app.harness.service.name }}-gk"
+ labels:
+ app: "{{ .app.harness.service.name }}-gk"
+
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: "{{ .app.harness.service.name }}-gk"
+ template:
+ metadata:
+ annotations:
+ checksum/config: {{ .app.harness.uri_role_mapping | toString | sha256sum }}
+ labels:
+ app: "{{ .app.harness.service.name }}-gk"
+ spec:
+{{ include "deploy_utils.etcHosts" .root | indent 6 }}
+ containers:
+ - name: {{ .app.harness.service.name | quote }}
+ image: "quay.io/gogatekeeper/gatekeeper:1.3.8"
+ imagePullPolicy: IfNotPresent
+ {{ if .root.Values.local }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ {{- end }}
+ env:
+ - name: PROXY_CONFIG_FILE
+ value: /opt/proxy.yml
+ volumeMounts:
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
+ mountPath: /opt/proxy.yml
+ subPath: proxy.yml
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
+ mountPath: /etc/pki/ca-trust/source/anchors/cacert.crt
+ subPath: cacert.crt
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
+ mountPath: /templates/access-denied.html.tmpl
+ subPath: access-denied.html.tmpl
+ ports:
+ - name: http
+ containerPort: 8080
+ - name: https
+ containerPort: 8443
+ resources:
+ requests:
+ memory: "32Mi"
+ cpu: "50m"
+ limits:
+ memory: "64Mi"
+ cpu: "100m"
+ volumes:
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
+ configMap:
+ name: "{{ .app.harness.service.name }}-gk"
+---
+{{- end }}
+{{- if .Values.secured_gatekeepers }}
+{{ $files := .Files }}
+{{- range $app := .Values.apps }}
+ {{- if and (hasKey $app "port") ($app.harness.secured) }}
+---
+ {{ include "deploy_utils.securedservice" (dict "root" $ "app" $app "files" $files) }}
+ {{- end }}
+ {{- range $subapp := $app }}
+ {{- if contains "map" (typeOf $subapp) }}
+ {{- if and (hasKey $subapp "harness.port") (hasKey $subapp "harness.secured") }}
+ {{- if $subapp.harness.secured }}
+---
+ {{ include "deploy_utils.securedservice" (dict "root" $ "app" $subapp "files" $files) }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/deployment-configuration/compose/templates/auto-secrets.yaml b/deployment-configuration/compose/templates/auto-secrets.yaml
new file mode 100644
index 00000000..a0a37a2f
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-secrets.yaml
@@ -0,0 +1,50 @@
+{{- define "deploy_utils.secret" }}
+{{- if .app.harness.secrets }}
+{{- $secret_name := printf "%s" .app.harness.deployment.name }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secret_name }}
+ namespace: {{ .root.Values.namespace }}
+ labels:
+ app: {{ .app.harness.deployment.name }}
+type: Opaque
+ {{- $secret := (lookup "v1" "Secret" .root.Values.namespace $secret_name) }}
+ {{- if $secret }}
+# secret already exists
+ {{- if not (compact (values .app.harness.secrets)) }}
+# secret values are null, copy from the existing secret
+data:
+ {{- range $k, $v := $secret.data }}
+ {{ $k }}: {{ $v }}
+ {{- end }}
+ {{- else }}
+# there are non default values in values.yaml, use these
+stringData:
+ {{- range $k, $v := .app.harness.secrets }}
+ {{ $k }}: {{ $v | default (randAlphaNum 20) }}
+ {{- end }}
+ {{- end }}
+ {{- else }}
+# secret doesn't exist
+stringData:
+ {{- range $k, $v := .app.harness.secrets }}
+ {{ $k }}: {{ $v | default (randAlphaNum 20) }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+---
+{{- end }}
+---
+{{- range $app := .Values.apps }}
+---
+ {{- include "deploy_utils.secret" (dict "root" $ "app" $app) }}
+ {{- range $subapp := $app }}
+ {{- if contains "map" (typeOf $subapp) }}
+ {{- if hasKey $subapp "harness" }}
+---
+ {{- include "deploy_utils.secret" (dict "root" $ "app" $subapp) }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index c088dc44..d5d51a6f 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -166,21 +166,20 @@ def __post_process_multiple_document_docker_compose(self, yaml_document):
documents = yaml.safe_load_all(f)
for document in documents:
+ if not document:
+ continue
if "cloudharness-metadata" in document:
document_path = self.dest_deployment_path / document["cloudharness-metadata"]["path"]
logging.info("Post-process docker-compose.yaml, creating %s", document_path)
- document_path.write_text(document["data"])
+ data = document["data"]
+ # if document_path.suffix == ".yaml":
+ # with open(document_path, "w") as f:
+ # yaml.dump(yaml.safe_load(data), f, default_flow_style=True)
+ # else:
+ document_path.write_text(data)
else:
with open(yaml_document, "w") as f:
- yaml.dump(document, f)
-
- # cloudharness-metadata:
- # path: secrets.yaml
-
- # data: |
- # sdfmsldkf
- # sdfmlskdfmslkdfs
- # sdmlksdf
+ yaml.dump(document, f, default_flow_style=False)
def __process_applications(self, helm_values, base_image_name):
for root_path in self.root_paths:
From 7acc7f5eade36af4f859f8b6686444b0d9ed65d6 Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 22 Feb 2024 08:21:00 -0600
Subject: [PATCH 041/210] CH-100 Add generation of resources files
---
.../compose/templates/auto-compose.yaml | 2 +-
.../compose/templates/auto-resources.yaml | 18 ++++++++++++++++++
.../compose/templates/auto-secrets.yaml | 3 ++-
.../ch_cli_tools/dockercompose.py | 12 ++++++++++--
4 files changed, 31 insertions(+), 4 deletions(-)
create mode 100644 deployment-configuration/compose/templates/auto-resources.yaml
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index f25f558e..9a0f4fd9 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -108,7 +108,7 @@ services:
{{- with $app_config.harness.resources }}
{{- range .}}
- type: bind
- source: compose/resources/{{ $app_name }}/{{ .src }}
+ source: compose/resources/generated/{{ $app_name }}/{{ .src }}
target: {{ .dst }}
{{- end }}
{{- end}}
diff --git a/deployment-configuration/compose/templates/auto-resources.yaml b/deployment-configuration/compose/templates/auto-resources.yaml
new file mode 100644
index 00000000..8d1e1e73
--- /dev/null
+++ b/deployment-configuration/compose/templates/auto-resources.yaml
@@ -0,0 +1,18 @@
+{{- define "deploy_utils.resource"}}
+{{ $service_name := .app.harness.deployment.name }}
+---
+# {{ $service_name }}-{{ .resource.name }}
+cloudharness-metadata:
+ path: resources/generated/{{ $service_name }}/{{ base .resource.src }}
+data: |
+{{ tpl (.root.Files.Get (print "resources/" $service_name "/" .resource.src)) .root | trim | indent 2 }}
+{{- end}}
+
+{{- range $app := .Values.apps }}
+ {{- if and (hasKey $app "port") $app.harness.deployment.auto | default false }}
+---
+ {{- range $resource := $app.harness.resources }}
+ {{- include "deploy_utils.resource" (dict "app" $app "resource" $resource "root" $) }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/templates/auto-secrets.yaml b/deployment-configuration/compose/templates/auto-secrets.yaml
index a0a37a2f..9635d33a 100644
--- a/deployment-configuration/compose/templates/auto-secrets.yaml
+++ b/deployment-configuration/compose/templates/auto-secrets.yaml
@@ -1,5 +1,6 @@
{{- define "deploy_utils.secret" }}
-{{- if .app.harness.secrets }}
+{{- if and .app.harness.secrets false }} {{/* TODO */}}
+
{{- $secret_name := printf "%s" .app.harness.deployment.name }}
apiVersion: v1
kind: Secret
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index d5d51a6f..c28eb2b7 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -165,21 +165,29 @@ def __post_process_multiple_document_docker_compose(self, yaml_document):
with open(yaml_document, "r") as f:
documents = yaml.safe_load_all(f)
+ main_document = None
for document in documents:
if not document:
continue
if "cloudharness-metadata" in document:
document_path = self.dest_deployment_path / document["cloudharness-metadata"]["path"]
logging.info("Post-process docker-compose.yaml, creating %s", document_path)
+ document_path.parent.mkdir(parents=True, exist_ok=True)
data = document["data"]
# if document_path.suffix == ".yaml":
# with open(document_path, "w") as f:
# yaml.dump(yaml.safe_load(data), f, default_flow_style=True)
# else:
+
document_path.write_text(data)
else:
- with open(yaml_document, "w") as f:
- yaml.dump(document, f, default_flow_style=False)
+ # We need to save the main document later
+ # "safe_load_all" returns a generator over the file,
+ # so if we modify it while looping on "documents"
+ # the output will be affected (probably truncated for some outputs)
+ main_document = document # we need to save the main document later,
+ with open(yaml_document, "w") as f:
+ yaml.dump(main_document, f, default_flow_style=False)
def __process_applications(self, helm_values, base_image_name):
for root_path in self.root_paths:
From 571c2ab494bd2207f500ae78a5738f77ec49b719 Mon Sep 17 00:00:00 2001
From: aranega
Date: Thu, 22 Feb 2024 09:12:20 -0600
Subject: [PATCH 042/210] CH-100 Add dependency between service and db
---
deployment-configuration/compose/templates/auto-compose.yaml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 9a0f4fd9..abfdb2c6 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -135,6 +135,8 @@ services:
- "traefik.http.routers.{{ $app_name }}.rule=Host(`{{ $app_config.harness.subdomain }}.{{ $.Values.domain }}`)"
- "traefik.http.routers.{{ $app_name }}.entrypoints=web"
{{- with $app_config.harness.database }}
+ depends_on:
+ - {{ .name }}
{{- if not .auto }}
{{- continue}}
{{- end }}
From 9c8c19ee7b7b79080e1ceb5c87979c363855f5ba Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 06:00:47 -0600
Subject: [PATCH 043/210] CH-100 Add secret handling
---
.../compose/templates/auto-compose.yaml | 3 ++
.../compose/templates/auto-secrets.yaml | 32 +++++++++----------
deployment/sc.yaml | 7 ----
3 files changed, 18 insertions(+), 24 deletions(-)
delete mode 100644 deployment/sc.yaml
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index abfdb2c6..120b8fa9 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -99,6 +99,9 @@ services:
{{- end }}
volumes:
- ./compose/allvalues.yaml:/opt/cloudharness/resources/allvalues.yaml:ro
+ {{- range $file_name, $_ := $app_config.harness.secrets }}
+ - ./compose/resources/generated/auth/{{ $file_name }}:/opt/cloudharness/resources/auth/{{ $file_name }}
+ {{- end }}
{{- if or $deployment.volume $app_config.harness.resources }}
{{- with $deployment.volume }}
- type: volume
diff --git a/deployment-configuration/compose/templates/auto-secrets.yaml b/deployment-configuration/compose/templates/auto-secrets.yaml
index 9635d33a..ed9345d2 100644
--- a/deployment-configuration/compose/templates/auto-secrets.yaml
+++ b/deployment-configuration/compose/templates/auto-secrets.yaml
@@ -1,49 +1,47 @@
{{- define "deploy_utils.secret" }}
-{{- if and .app.harness.secrets false }} {{/* TODO */}}
+{{- if .app.harness.secrets }}
{{- $secret_name := printf "%s" .app.harness.deployment.name }}
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ $secret_name }}
- namespace: {{ .root.Values.namespace }}
- labels:
- app: {{ .app.harness.deployment.name }}
-type: Opaque
{{- $secret := (lookup "v1" "Secret" .root.Values.namespace $secret_name) }}
{{- if $secret }}
# secret already exists
{{- if not (compact (values .app.harness.secrets)) }}
# secret values are null, copy from the existing secret
-data:
{{- range $k, $v := $secret.data }}
- {{ $k }}: {{ $v }}
+cloudharness-metadata:
+ path: resources/generated/auth/{{ $k }}
+
+data: {{ $v }}
+---
{{- end }}
{{- else }}
# there are non default values in values.yaml, use these
stringData:
{{- range $k, $v := .app.harness.secrets }}
- {{ $k }}: {{ $v | default (randAlphaNum 20) }}
+cloudharness-metadata:
+ path: resources/generated/auth/{{ $k }}
+
+data: {{ $v | default (randAlphaNum 20) }}
+---
{{- end }}
{{- end }}
{{- else }}
# secret doesn't exist
stringData:
{{- range $k, $v := .app.harness.secrets }}
- {{ $k }}: {{ $v | default (randAlphaNum 20) }}
+cloudharness-metadata:
+ path: resources/generated/auth/{{ $k }}
+data: {{ $v | default (randAlphaNum 20) }}
+---
{{- end }}
{{- end }}
{{- end }}
----
{{- end }}
----
{{- range $app := .Values.apps }}
----
{{- include "deploy_utils.secret" (dict "root" $ "app" $app) }}
{{- range $subapp := $app }}
{{- if contains "map" (typeOf $subapp) }}
{{- if hasKey $subapp "harness" }}
----
{{- include "deploy_utils.secret" (dict "root" $ "app" $subapp) }}
{{- end }}
{{- end }}
diff --git a/deployment/sc.yaml b/deployment/sc.yaml
deleted file mode 100644
index 69c99d99..00000000
--- a/deployment/sc.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: standard
-provisioner: docker.io/hostpath
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
\ No newline at end of file
From f6d0c6763b2378ff5cfe331d019dfa735bf61f2a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 06:25:39 -0600
Subject: [PATCH 044/210] CH-100 Remove argo from dependencies
---
.../compose/templates/auto-compose.yaml | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 120b8fa9..b28fbc88 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -91,7 +91,10 @@ services:
- {{ . }}:{{ $service_name }}.{{ $.Values.domain }}
{{- end }}
{{- end }}
- {{- with $app_config.harness.dependencies.hard }}
+ {{/* Takes the hard deps, removes argo and adds the db if there is one */}}
+ {{/* To be sure to add the db properly, we "dig" the "harness" config for "database.name" and return "" if one of the keys doesn't exist */}}
+ {{/* "compact" in the beginning is to remove empty values */}}
+ {{- with compact (append (without $app_config.harness.dependencies.hard "argo") (dig "database" "name" "" $app_config.harness) ) }}
depends_on:
{{- range . }}
- {{ . }}
@@ -138,8 +141,6 @@ services:
- "traefik.http.routers.{{ $app_name }}.rule=Host(`{{ $app_config.harness.subdomain }}.{{ $.Values.domain }}`)"
- "traefik.http.routers.{{ $app_name }}.entrypoints=web"
{{- with $app_config.harness.database }}
- depends_on:
- - {{ .name }}
{{- if not .auto }}
{{- continue}}
{{- end }}
From 4fd6deeb480add6f99fbc30223d1af8353a9288a Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 06:41:41 -0600
Subject: [PATCH 045/210] CH-100 Change from pyyaml to ruamel
The dependency is already gathered from a third-party lib
---
.../ch_cli_tools/dockercompose.py | 45 +++++++++----------
1 file changed, 20 insertions(+), 25 deletions(-)
diff --git a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
index c28eb2b7..bafe5a00 100644
--- a/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
+++ b/tools/deployment-cli-tools/ch_cli_tools/dockercompose.py
@@ -2,6 +2,7 @@
Utilities to create a helm chart from a CloudHarness directory structure
"""
import yaml
+from ruamel.yaml import YAML
import os
import shutil
import logging
@@ -162,32 +163,26 @@ def __post_process_multiple_document_docker_compose(self, yaml_document):
logging.warning("Something went wrong during the docker-compose.yaml generation, cannot post-process it")
return
- with open(yaml_document, "r") as f:
- documents = yaml.safe_load_all(f)
+ yaml_handler = YAML()
+ documents = yaml_handler.load_all(yaml_document)
- main_document = None
- for document in documents:
- if not document:
- continue
- if "cloudharness-metadata" in document:
- document_path = self.dest_deployment_path / document["cloudharness-metadata"]["path"]
- logging.info("Post-process docker-compose.yaml, creating %s", document_path)
- document_path.parent.mkdir(parents=True, exist_ok=True)
- data = document["data"]
- # if document_path.suffix == ".yaml":
- # with open(document_path, "w") as f:
- # yaml.dump(yaml.safe_load(data), f, default_flow_style=True)
- # else:
-
- document_path.write_text(data)
- else:
- # We need to save the main document later
- # "safe_load_all" returns a generator over the file,
- # so if we modify it while looping on "documents"
- # the output will be affected (probably truncated for some outputs)
- main_document = document # we need to save the main document later,
- with open(yaml_document, "w") as f:
- yaml.dump(main_document, f, default_flow_style=False)
+ main_document = None
+ for document in documents:
+ if not document:
+ continue
+ if "cloudharness-metadata" in document:
+ document_path = self.dest_deployment_path / document["cloudharness-metadata"]["path"]
+ logging.info("Post-process docker-compose.yaml, creating %s", document_path)
+ document_path.parent.mkdir(parents=True, exist_ok=True)
+ data = document["data"]
+ document_path.write_text(data)
+ else:
+ # We need to save the main document later
+ # "load_all" returns a generator over the file,
+ # so if we modify it while looping on "documents"
+ # the output will be affected (probably truncated for some outputs)
+ main_document = document # we need to save the main document later
+ yaml_handler.dump(main_document, yaml_document)
def __process_applications(self, helm_values, base_image_name):
for root_path in self.root_paths:
From 86c0e4f51c4c4a56a4b59e148f1c1454675286ec Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 06:42:24 -0600
Subject: [PATCH 046/210] CH-100 Remove tmp "events" from deps
---
deployment-configuration/compose/templates/auto-compose.yaml | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index b28fbc88..8fc62e83 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -88,7 +88,11 @@ services:
links:
{{- range . }}
{{- $service_name := (get $.Values.apps .).harness.service.name }}
+ {{- if eq . "events"}}
+# - {{ . }}:{{ $service_name }}.{{ $.Values.domain }}
+ {{- else }}
- {{ . }}:{{ $service_name }}.{{ $.Values.domain }}
+ {{- end }}
{{- end }}
{{- end }}
{{/* Takes the hard deps, removes argo and adds the db if there is one */}}
From a5909cea41d1af911c025c29711adb73fb299ed4 Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 06:46:25 -0600
Subject: [PATCH 047/210] CH-100 Add back sc.yaml
---
deployment/sc.yaml | 7 +++++++
1 file changed, 7 insertions(+)
create mode 100644 deployment/sc.yaml
diff --git a/deployment/sc.yaml b/deployment/sc.yaml
new file mode 100644
index 00000000..69c99d99
--- /dev/null
+++ b/deployment/sc.yaml
@@ -0,0 +1,7 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: standard
+provisioner: docker.io/hostpath
+reclaimPolicy: Delete
+volumeBindingMode: Immediate
\ No newline at end of file
From 4379252dd0bea6e6b4ad4d66f671e075fca598f9 Mon Sep 17 00:00:00 2001
From: aranega
Date: Fri, 23 Feb 2024 07:46:38 -0600
Subject: [PATCH 048/210] CH-100 Add first handling of NFS volume
---
.../nfsserver-deployment.yaml | 18 ++++++++++++------
.../compose/templates/auto-compose.yaml | 7 +++++++
2 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
index 7e9b6819..50dc08ed 100644
--- a/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
+++ b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
@@ -1,13 +1,19 @@
{{- define "nfsserver.deployment" }}
-{{- $nfs := .apps.nfsserver}}
+{{- with .apps.nfsserver}}
-{{ $nfs.name }}:
- image: {{ $nfs.harness.deployment.image }}
+{{ .name }}:
+ image: {{ .harness.deployment.image }}
environment:
- # NFS useDNS? {{ $nfs.nfs.useDNS }}
- {{- if $nfs.nfs.useDNS }}
+ # NFS useDNS? {{ .nfs.useDNS }}
+ {{- if .nfs.useDNS }}
- NFS_SERVER={{ printf "nfs-server.%s.svc.cluster.local" .namespace }}
{{- end }}
- - NFS_PATH={{ $nfs.nfs.path }}
+ - NFS_PATH={{ .nfs.path }}
- PROVISIONER_NAME={{ printf "%s-nfs-provisioner" .namespace }}
+
+ volumes:
+ - type: volume
+ source: {{ .nfs.volumeName }}
+ target: {{ .nfs.path }}
+{{- end }}
{{- end }}
\ No newline at end of file
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index 8fc62e83..a8ac40cf 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -168,4 +168,11 @@ volumes: # this inclusion needs to be conditional
dshm:
{{- end }}
{{- end }}
+ {{- if eq $app_name "nfsserver" }}
+ {{ $app_config.nfs.volumeName }}:
+ # driver_opts:
+ # type: "nfs"
+ # o: "{{ join "," $app_config.nfs.mountOptions }}"
+ # device: ":{{ $app_config.nfs.path }}"
+ {{- end }}
{{- end }}
From 46ee1408a87baf8f4f387a45ba4173af77b6a0e4 Mon Sep 17 00:00:00 2001
From: aranega
Date: Mon, 26 Feb 2024 11:36:48 -0600
Subject: [PATCH 049/210] CH-100 Add volumes conditionnally
---
.../compose/templates/auto-compose.yaml | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/deployment-configuration/compose/templates/auto-compose.yaml b/deployment-configuration/compose/templates/auto-compose.yaml
index a8ac40cf..c2aad014 100644
--- a/deployment-configuration/compose/templates/auto-compose.yaml
+++ b/deployment-configuration/compose/templates/auto-compose.yaml
@@ -157,7 +157,20 @@ networks:
ch:
name: ch_network
-volumes: # this inclusion needs to be conditional
+{{- range $app_name, $app_config := .Values.apps }}
+ {{- with $app_config.harness.deployment.volume }}
+volumes:
+ {{- break }}
+ {{- end }}
+ {{- with $app_config.harness.database }}
+volumes:
+ {{- break }}
+ {{- end }}
+ {{- if eq $app_name "nfsserver" }}
+volumes:
+ {{- break }}
+ {{- end }}
+{{- end }}
{{- range $app_name, $app_config := .Values.apps }}
{{- with $app_config.harness.deployment.volume }}
{{ .name }}:
From e135c6a6dbef827854d742e188f4866703da2b00 Mon Sep 17 00:00:00 2001
From: aranega
Date: Mon, 26 Feb 2024 12:27:56 -0600
Subject: [PATCH 050/210] CH-100 Add first tutorial about docker compose
---
...clock-application-with-docker-compose.adoc | 328 ++++++++++++++++++
1 file changed, 328 insertions(+)
create mode 100644 docs/tutorials/simple-date-clock-application-with-docker-compose.adoc
diff --git a/docs/tutorials/simple-date-clock-application-with-docker-compose.adoc b/docs/tutorials/simple-date-clock-application-with-docker-compose.adoc
new file mode 100644
index 00000000..70e8f1e2
--- /dev/null
+++ b/docs/tutorials/simple-date-clock-application-with-docker-compose.adoc
@@ -0,0 +1,328 @@
+:repo_url: ../../../../../
+:ch: CloudHarness
+:dc: Docker Compose
+:dc-: docker compose
+:repo_fragment: MetaCell/cloud-harness
+
+
+= A simple date-clock application: tutorial {ch} targeting {dc}
+
+In this small tutorial, we will see different aspects about the development of applications with {ch} through the development from scratch of a small webapp that fetches information from a server on a regular basis.
+This tutorial will show you how to generate the `{dc-}` configuration and how to build and deploy this simple application.
+
+{ch} generates the initial files and folders for your project depending on some templates tacking different aspects of your app depending on your requirements, __e.g.__, for a webapp project, it generates the frontend initial files for ReactJS and the initial Flask files for the backend.
+For the API part, {ch} relies on OpenAPI 3 to deal with the endpoints/model description.
+
+The different aspects that will be covered here are:
+
+* how to bootstrap a new app, build it, and deploy it on {dc};
+* how to modify/update the app, built it and run it again.
+
+== The tools you need to deploy/build your application
+
+The following tools, beside python, are not required to work with {ch}.
+Before installing everything, please be sure you have the following tools installed on your machine:
+
+* `python`
+* `yarn`
+* `{ch}` -- if not installed, please check other documentation and tutorials
+* `helm` -- to deal with the generation of the {dc}
+* `skaffold` -- to build the different images that will run on {dc}
+* `{dc-}` -- to actually run the built application
+
+
+== Creating a very simple webapp
+
+Now that we know how to configure/run/deploy apps on our local cluster, we will create a very simple webapp.
+In this first time, we will only generate the project's artifacts using the `harness-application`, then, we will build/run/deploy it.
+In a second time, we will modify the API to add new endpoints and deal with the frontend accordingly.
+
+=== Creating a new webapp and building the frontend
+
+The webapp that we will create will be a useless webapp that will fetch the current date and time when a button is pressed.
+Nothing fancy, just a way to see how to interact with the generated sources and get everything running on your local cluster.
+
+The first step is to generate the projects files.
+In our case, we want to develop a webapp, meaning that we want a frontend and a backend.
+We use `harness-application` to generate the first files with a specific templates: `webapp` and `flask-server`.
+We first place ourself in the parent directory of where you cloned the `cloud-harness` repository.
+
+[NOTE]
+We could place ourself anywhere, we would just have to remember the path towards the `cloud-harness` repository.
+
+.Generating the first project's file
+[source,bash]
+----
+harness-application clockdate -t webapp -t flask-server
+----
+
+The name of the application is `clockdate` and we use the `webapp` and `flask-server` template.
+There is various existing templates with different purpose: for DB interaction, backend, frontend, ...
+
+We observe now that a new directory had been created in an `applications` folder named `clockdate`.
+The folder is organized with many sub-folders, all playing a different role in the app.
+
+.Ensuring that the backend is considered as a webapp
+We will now make a small modification, or ensure that the code of the backend includes its activation as "webapp".
+Open the file generated in `clockdate/backend/clockdate/__main__.py` and check that the following line has the keyword parameter `webapp` set to `True`.
+
+[source,python]
+----
+app = init_flask(title="clockdate", init_app_fn=None, webapp=True)
+----
+
+This option ensures the registration of some specific endpoints by {ch}.
+In this case, it ensures that the `/` endpoint will be mapped to the `index.html` produced for the frontend.
+
+.Building the frontend
+
+In this tutorial, before generating the configuration files for {dc} by {ch}, we will build the frontend using `yarn`.
+Enter in the `clockdate/frontend` folder and just type
+
+[source, bash]
+----
+yarn install
+----
+
+This will generate a `yarn.lock` which is required later for the build of the Docker images.
+
+[NOTE]
+This step could have been done later, but it has to be done *before* the build of the different Docker images using `skaffold`.
+
+
+.Generating the `{dc-}` configuration files for our `clockdate` app
+[source,bash]
+----
+# run in the directory that contains the cloud-harness repository
+harness-deployment cloud-harness . -u -dtls -l -d azathoth.local -e local -n azathoth -i clockdate --docker-compose
+----
+
+The key here is to add the `--docker-compose` option that will trigger the generation of a set of files in the `deployment` folder,
+as well as a slightly modified version of the `skaffold.yaml` file.
+
+As a result, in the `deployment` folder, we should have something that looks like this:
+
+[source]
+----
++- CURRENT_DIRECTORY
+ [...]
+ + deployment/
+ + compose/ -> the template files and some generated files dedicated to docker compose
+ `- docker-compose.yaml -> the main file used by {dc} to deploy everything
+ `- skaffold.yaml -> used by skaffold to build the Docker images
+----
+
+Now you can build/deploy/run it using `skaffold`.
+
+[source,bash]
+----
+skaffold build
+----
+
+.Deploying your app on {dc}
+
+To deploy the application on {dc}, you only need to position yourself in the directory where the `docker-compose.yaml` file was generated, so in the `deployment` folder.
+
+[source,bash]
+----
+cd deployment
+docker compose up
+----
+
+This command will download the necessary images and reuses the ones built by `skaffold` to deploy everything.
+
+Now, to be sure to access properly the app, a small addition to your `/etc/hosts` file is required as such:
+
+[source]
+----
+127.0.0.1 clockdate.azathoth.local
+----
+
+Now you can open your browser to `http://clockdate.azathoth.local` and see that everything is running properly.
+You can also go to `http://clockdate.azathoth.local/api/ping` and check that you have a message.
+
+
+=== Modifying your webapp, adding behavior
+
+We are currently capable of generating/running applications, but we did not add our own behavior.
+We need to modify the generated sources to do so.
+If we take a deeper look to the folder generated by `harness-application`, we observe three folders that are the one we will modify on a normal usage/base:
+
+.Generated directory organization
+[source]
+----
++- api -> owns the OpenAPI definition of the endpoints/resources handled by the API
++- backend
+ `- clockdate -> the project backend files
+ |- controllers -> the controller definition
+ `- models -> the resources exposed by the API
++- frontend -> the webpage files
+----
+
+In a first time, we will modify the backend to add a new endpoint that will answer in a string the current date and time.
+The process is the following:
+
+. we add the new endpoint in the `openapi` folder, modifying the `openapi.yaml` file,
+. we regenerate the code of the application using `harness-generate`
+. we code the behavior of the endpoint in the dedicated method generated in the `backend/clockdate/controllers` folder.
+. we build/deploy/run the code to see it running (this step can be changed with a pure python run of the backend for a quicker dev loop).
+
+==== Adding the new endpoint to the openapi specification
+
+We will add a new endpoint named `currentdate` that will answer a string when `GET`.
+To do so, we add a special path in the `path` section.
+
+.Modifying the `api/openapi.yaml` file
+[source,yaml]
+----
+paths:
+ /currentdate:
+ get:
+ operationId: currentdate
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ type: string
+ description: Current date and time
+ "500":
+ description: System cannot give the current time
+ summary: Gets the current date and time
+ tags: [datetime]
+----
+
+[NOTE]
+The name of the controller in which the function related to the endpoint will be generated depends on the `tags` value in defined in the `api/openapi.yaml` file.
+
+We validate that our openAPI specification is correct.
+
+[source]
+----
+$ openapi-spec-validator applications/clockdate/api/openapi.yaml
+OK
+----
+
+Now we generate again the code the application using `harness-application` another time.
+
+.Regenerating the code of our modified app
+[source,bash]
+----
+harness-application clockdate -t flask-server -t webapp
+----
+
+This will add a new `datetime_controller.py` in the `backend/clockdate/controllers` package.
+
+[IMPORTANT]
+You need to notice that all the controllers files (and all the files) are overridden in the `backend` directory.
+To prevent files of being overridden, you need to edit the `.openapi-generator-ignore` file, that acts like a `.gitignore` file (in a way), by marking the files/directories that needs to be ignored by the generation.
+
+When we open this file, we get the following controller method:
+
+[source,python]
+----
+def currentdate(): # noqa: E501
+ """Gets the current date and time
+
+ # noqa: E501
+
+
+ :rtype: str
+ """
+ return 'do some magic!'
+----
+
+This is the moment to add the behavior we want:
+
+[source,python]
+----
+def currentdate(): # noqa: E501
+ """Gets the current date and time
+
+ # noqa: E501
+
+
+ :rtype: str
+ """
+ from datetime import datetime
+ return f'{datetime.now()}'
+----
+
+We simply import the `datetime` module and type, and we ask for the current date and time.
+Here a string interpolation is used only to force the result to be considered and formatted as a string.
+It's not mandatory.
+
+Now that our new endpoint is coded, we can build/deploy/run it on our local cluster using `skaffold build` then `{dc-} up`.
+Once the deployment is done, we can navigate to: http://clockdate.azathoth.local/api/currentdate to appreciate the result.
+
+
+=== A quick and dirty frontend to test our endpoint
+
+
+Now that we have the "backend" running, we will modify the frontend to get a label and a button that will fetch the information about date and time from the new endpoint we defined.
+If we look in the frontend source code generated, we see a `src/rest/api.ts` file.
+The generated code targets ReactJS as framework.
+This module provides clients for the API generated from the `api/openapi.yaml` specification.
+Exactly, it provides one client by `tag` defined in the openAPI specification.
+In our case, we defined a tag `datetime`, so we find in `api.ts` a class `DatetimeApi`.
+This is the class we will instantiate and use to deal with the call to the API and the endpoint we defined in the previous section.
+
+First, we are going to code a new React component that will provide a header with the current date and time and a button to ask for a "fetch" of the current date and time from the server.
+
+We call this component `DateTime` inside of a `DateTime.tsx` file that is placed in the `src/components` directory.
+
+.Code of the `frontend/src/component/DateTime.tsx` component
+[source,javascript]
+----
+import React, { useState, useEffect, useCallback } from 'react';
+import { DatetimeApi } from '../rest/api'
+
+const api = new DatetimeApi() <1>
+
+const DateTime = () => {
+ const [datetime, setDatetime] = useState('unavailable');
+ useEffect(() => updateDate(), []);
+
+ const updateDate = useCallback(() => {
+ api.currentdate().then(r => setDatetime(r.data)); <2>
+ }, []);
+
+ return (
+
+
{datetime}
+
+
+ )
+}
+
+export default DateTime;
+----
+
+<1> The `DatetimeApi` class is instantiated, this is now the instance we will use everytime we need to perform a request toward an API endpoint.
+<2> is where is actually perform the call. The `currentdate` method is generated by {ch}.
+
+Now that we have our dedicated component, we will integrate it in the current page.
+To do that, we need to modify the `App.tsx` component.
+This component is located in `frontend/src/App.tsx`.
+We modify the content of this file this way:
+
+.Code of the `frontend/src/App.tsx` component
+[source,javascript]
+----
+import React from 'react';
+import './styles/style.less';
+import DateTime from './components/DateTime';
+
+const Main = () => (
+ <>
+