+ >
+)
+
-export default Main;
\ No newline at end of file
+export default Main;
diff --git a/application-templates/webapp/frontend/src/components/RestTest.tsx b/application-templates/webapp/frontend/src/components/RestTest.tsx
index 3fbaa9333..c1dc7c002 100644
--- a/application-templates/webapp/frontend/src/components/RestTest.tsx
+++ b/application-templates/webapp/frontend/src/components/RestTest.tsx
@@ -1,17 +1,19 @@
-import React, { useState, useEffect } from 'react';
+import { useState, useEffect } from 'react';
-import { TestApi } from '../rest/api'
+import { TestApi } from '../rest/apis/TestApi'
const test = new TestApi();
+
+
const RestTest = () => {
- const [result, setResult] = useState(null);
+ const [result, setResult] = useState(null);
useEffect(() => {
- test.ping().then(r => setResult(r), () => setResult({ data: "API error"}));
+ test.ping().then((r) => setResult(r), () => setResult( "API error"));
}, []);
- return result ?
Backend answered: { result.data }
:
Backend did not answer
+ return result ?
Backend answered: { result }
:
Backend did not answer
}
export default RestTest;
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/src/components/Version.tsx b/application-templates/webapp/frontend/src/components/Version.tsx
new file mode 100644
index 000000000..0cddc79df
--- /dev/null
+++ b/application-templates/webapp/frontend/src/components/Version.tsx
@@ -0,0 +1,19 @@
+import { useState, useEffect } from 'react';
+
+
+
+const Version = () => {
+ const [result, setResult] = useState(null);
+ useEffect(() => {
+ fetch("/proxy/common/api/version", {
+ headers: {
+ 'Accept': 'application/json'
+ }
+ }).then(r => r.json().then(j => setResult(j)), () => setResult("API error"));
+ }, []);
+
+
+ return result ?
Tag: { result?.tag } - Build: {result?.build}
:
Backend did not answer
+}
+
+export default Version;
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/src/index.css b/application-templates/webapp/frontend/src/index.css
new file mode 100644
index 000000000..7466f9054
--- /dev/null
+++ b/application-templates/webapp/frontend/src/index.css
@@ -0,0 +1,5 @@
+body {
+ text-align: center;
+ background-color: '#eeeeee';
+ font-family: Roboto, Helvetica, sans-serif;
+}
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/src/index.ejs b/application-templates/webapp/frontend/src/index.ejs
deleted file mode 100644
index 97d529ad2..000000000
--- a/application-templates/webapp/frontend/src/index.ejs
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-
-
- __APP_NAME__
-
-
-
-
-
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/src/index.tsx b/application-templates/webapp/frontend/src/index.tsx
deleted file mode 100644
index ae31e4134..000000000
--- a/application-templates/webapp/frontend/src/index.tsx
+++ /dev/null
@@ -1,6 +0,0 @@
-import React from 'react';
-import ReactDOM from 'react-dom';
-
-import App from './App';
-
-ReactDOM.render(, document.getElementById('root'));
diff --git a/application-templates/webapp/frontend/src/styles/style.less b/application-templates/webapp/frontend/src/styles/style.less
deleted file mode 100644
index b1bece6a0..000000000
--- a/application-templates/webapp/frontend/src/styles/style.less
+++ /dev/null
@@ -1,4 +0,0 @@
-body {
- text-align: center;
- background-color: '#eeeeee';
-}
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/src/utils/history.js b/application-templates/webapp/frontend/src/utils/history.js
deleted file mode 100644
index 7bef5c97e..000000000
--- a/application-templates/webapp/frontend/src/utils/history.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import { createBrowserHistory } from "history";
-export default createBrowserHistory();
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/tsconfig.json b/application-templates/webapp/frontend/tsconfig.json
deleted file mode 100644
index 21ec6c41c..000000000
--- a/application-templates/webapp/frontend/tsconfig.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "compilerOptions": {
- "outDir": "./dist/",
- "sourceMap": true,
- "noImplicitAny": false,
- "module": "commonjs",
- "target": "es2017",
- "stripInternal":true,
- "skipLibCheck": true,
- "alwaysStrict":false,
- "forceConsistentCasingInFileNames": true,
- "noImplicitReturns": false,
- "strict": false,
- "noUnusedLocals": false,
- "jsx": "react",
- "types": [],
- "esModuleInterop": true
- },
- "exclude": [
- "node_modules"
- ]
-}
\ No newline at end of file
diff --git a/application-templates/webapp/frontend/vite.config.ts b/application-templates/webapp/frontend/vite.config.ts
new file mode 100644
index 000000000..1d1df3944
--- /dev/null
+++ b/application-templates/webapp/frontend/vite.config.ts
@@ -0,0 +1,44 @@
+import { defineConfig, loadEnv } from 'vite'
+import react from '@vitejs/plugin-react'
+
+
+
+// https://vitejs.dev/config/
+export default defineConfig(({ mode }) => {
+ // Load env file based on `mode` in the current working directory.
+ // Set the third parameter to '' to load all env regardless of the `VITE_` prefix.
+ const env = loadEnv(mode, process.cwd(), '')
+
+ const theDomain = env && env.DOMAIN ? env.DOMAIN : 'localhost:5000';
+
+ console.log('Dev server address: ', theDomain);
+
+ const proxyTarget = theDomain;
+ const replaceHost = (uri: string, appName: string) => {
+ const host = (uri.includes("samples.") && uri.replace("samples.", appName + '.')) || uri;
+ console.log('Proxy target: ', host);
+ return host;
+ }
+
+
+ return {
+ plugins: [react()],
+ server: {
+ port: 9000,
+ proxy: {
+ '/api/': {
+ target: replaceHost(proxyTarget, 'samples'),
+ secure: false,
+ changeOrigin: true,
+ },
+ '/proxy/common/api': {
+ target: replaceHost(proxyTarget, 'common'),
+ secure: false,
+ changeOrigin: true,
+ rewrite: (path) => path.replace(/^\/proxy\/common\/api/, '/api')
+ }
+ }
+ }
+ }
+}
+)
diff --git a/application-templates/webapp/frontend/webpack.config.dev.js b/application-templates/webapp/frontend/webpack.config.dev.js
deleted file mode 100644
index 3c6a33ce6..000000000
--- a/application-templates/webapp/frontend/webpack.config.dev.js
+++ /dev/null
@@ -1,48 +0,0 @@
-const { merge } = require('webpack-merge');
-const common = require('./webpack.config.js');
-
-var path = require('path');
-
-const PORT = 9000;
-
-
-module.exports = env => {
-
- const theDomain = env && env.DOMAIN ? env.DOMAIN : 'localhost:5000';
-
- console.log('Dev server address: ', theDomain);
-
- const proxyTarget = theDomain;
- const replaceHost = (uri, appName) => (uri.includes("__APP_NAME__") && uri.replace("__APP_NAME__", appName + '.' + theDomain)) || uri;
- if (!env.port) {
- env.devPort = PORT;
- }
-
-
- const devServer = {
- static: [{
- directory: path.resolve(__dirname, 'dist'),
- publicPath: '/',
- }],
- compress: true,
- https: env.DOMAIN.includes("https"),
- port: Number(env.devPort),
- historyApiFallback: true,
- proxy: {
- '/api/': {
- target: replaceHost( proxyTarget, '__APP_NAME__'),
- secure: false,
- changeOrigin: true,
- }
- },
- };
-
- return merge(
- common(env),
- {
- mode: 'development',
- devtool: 'source-map',
- devServer,
- }
- )
-};
diff --git a/application-templates/webapp/frontend/webpack.config.js b/application-templates/webapp/frontend/webpack.config.js
deleted file mode 100644
index ad5ee5566..000000000
--- a/application-templates/webapp/frontend/webpack.config.js
+++ /dev/null
@@ -1,124 +0,0 @@
-const path = require("path");
-const HtmlWebpackPlugin = require("html-webpack-plugin");
-const CompressionPlugin = require("compression-webpack-plugin");
-const { CleanWebpackPlugin } = require("clean-webpack-plugin");
-const CopyWebpackPlugin = require("copy-webpack-plugin");
-
-const copyPaths = [
- { from: path.resolve(__dirname, "src/assets"), to: "assets" },
-];
-
-module.exports = function webpacking(envVariables) {
- let env = envVariables;
- if (!env) {
- env = {};
- }
- if (!env.mode) {
- env.mode = "production";
- }
-
- console.log("####################");
- console.log("####################");
- console.log("BUILD bundle with parameters:");
- console.log(env);
- console.log("####################");
- console.log("####################");
-
- const { mode } = env;
- const devtool = "source-map";
-
- const output = {
- path: path.resolve(__dirname, "dist"),
- filename: "[name].[contenthash].js",
- publicPath: "/"
- };
-
- const module = {
- rules: [
- {
- test: /\.(js|jsx)$/,
- exclude: /node_modules/,
- loader: "babel-loader",
- },
- {
- test: /\.ts(x?)$/,
- include: path.resolve(__dirname, 'src'),
- use: [
- {
- loader: "ts-loader",
- options: {
- transpileOnly: true,
- },
- }
- ]
- },
- {
- test: /\.(css)$/,
- use: [
- {
- loader: "style-loader",
- },
- {
- loader: "css-loader",
- },
- ],
- },
- {
- test: /\.less$/,
- use: [
- {
- loader: "style-loader",
- },
- {
- loader: "css-loader",
- },
- {
- loader: "less-loader",
- options: {
- lessOptions: {
- strictMath: true,
- },
- },
- },
- ],
- },
- {
- test: /\.(png|jpg|gif|eot|woff|woff2|svg|ttf)$/,
- use: [
- "file-loader",
- {
- loader: "image-webpack-loader",
- options: {
- bypassOnDebug: true, // webpack@1.x
- disable: true, // webpack@2.x and newer
- },
- },
- ],
- },
- ],
- };
-
- const resolve = {
- extensions: ["*", ".js", ".json", ".ts", ".tsx", ".jsx"],
- symlinks: false,
- };
-
- const plugins = [
- new CleanWebpackPlugin(),
- new CopyWebpackPlugin({ patterns: copyPaths }),
- new CompressionPlugin(),
- new HtmlWebpackPlugin({
- template: "src/index.ejs",
- favicon: path.join(__dirname, "src/assets/icon.png"),
- }),
- ];
-
- return {
- mode,
- devtool,
- output,
- module,
- resolve,
- plugins,
- };
-};
diff --git a/applications/accounts/Dockerfile b/applications/accounts/Dockerfile
index c14b452fc..4e61bef42 100644
--- a/applications/accounts/Dockerfile
+++ b/applications/accounts/Dockerfile
@@ -1,4 +1,4 @@
-FROM quay.io/keycloak/keycloak:16.1.0
+FROM quay.io/keycloak/keycloak:16.1.1
# add kubectl
USER root
diff --git a/applications/common/server/common/__main__.py b/applications/common/server/common/__main__.py
index 4bd6ef172..0a81a3f80 100644
--- a/applications/common/server/common/__main__.py
+++ b/applications/common/server/common/__main__.py
@@ -7,13 +7,13 @@
from common.controllers.sentry_controller import global_dsn
-
def init_fn(app):
log.info("initializing database from app")
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
if not global_dsn:
open_db(app)
+
app = init_flask(init_app_fn=init_fn)
if __name__ == '__main__':
diff --git a/applications/common/server/common/config.py b/applications/common/server/common/config.py
index 23d0636f3..6e29d747a 100644
--- a/applications/common/server/common/config.py
+++ b/applications/common/server/common/config.py
@@ -22,7 +22,6 @@ class Config(object):
log.error("Cannot configure SENTRY")
-
class ProductionConfig(Config):
DEBUG = False
diff --git a/applications/common/server/common/controllers/accounts_controller.py b/applications/common/server/common/controllers/accounts_controller.py
index 90202ef36..b09613be9 100644
--- a/applications/common/server/common/controllers/accounts_controller.py
+++ b/applications/common/server/common/controllers/accounts_controller.py
@@ -3,6 +3,7 @@
from cloudharness import applications
from cloudharness.utils.config import CloudharnessConfig
+
def get_config(): # noqa: E501
"""
Gets the config for logging in into accounts
diff --git a/applications/common/server/common/controllers/config_controller.py b/applications/common/server/common/controllers/config_controller.py
index 169a04788..72f4fdd38 100644
--- a/applications/common/server/common/controllers/config_controller.py
+++ b/applications/common/server/common/controllers/config_controller.py
@@ -11,6 +11,7 @@
from cloudharness.utils.config import CloudharnessConfig
from cloudharness_model.models import HarnessMainConfig
+
def get_version(): # noqa: E501
"""get_version
diff --git a/applications/common/server/common/controllers/security_controller_.py b/applications/common/server/common/controllers/security_controller_.py
index ecac40558..7d686cba7 100644
--- a/applications/common/server/common/controllers/security_controller_.py
+++ b/applications/common/server/common/controllers/security_controller_.py
@@ -1,3 +1 @@
from typing import List
-
-
diff --git a/applications/common/server/common/controllers/sentry_controller.py b/applications/common/server/common/controllers/sentry_controller.py
index 1daee66fa..4d0727825 100644
--- a/applications/common/server/common/controllers/sentry_controller.py
+++ b/applications/common/server/common/controllers/sentry_controller.py
@@ -13,6 +13,7 @@
except:
global_dsn = None
+
def getdsn(appname): # noqa: E501
"""
Gets the Sentry DSN for a given application or returns the global dsn when set
diff --git a/applications/common/server/common/models.py b/applications/common/server/common/models.py
index 8817fa654..843e9d20d 100644
--- a/applications/common/server/common/models.py
+++ b/applications/common/server/common/models.py
@@ -16,4 +16,4 @@ def __init__(self, url, result_all, result_no_stop_words):
self.result_no_stop_words = result_no_stop_words
def __repr__(self):
- return ''.format(self.id)
\ No newline at end of file
+ return ''.format(self.id)
diff --git a/applications/common/server/common/repository/sentry.py b/applications/common/server/common/repository/sentry.py
index f53349951..4ad449ac0 100644
--- a/applications/common/server/common/repository/sentry.py
+++ b/applications/common/server/common/repository/sentry.py
@@ -1,14 +1,16 @@
-import sqlalchemy
+import sqlalchemy
from sqlalchemy.sql import text
from cloudharness.utils.env import get_service_public_address
from .db import get_db
+
class SentryProjectNotFound(Exception):
pass
+
def _get_api_token():
# ToDo: may be we can use here a dynamic token, but for now let's use a hard coded one
api_token = 'afe75d802007405dbc0c2fb1db4cc8b06b981017f58944d0afac700f743ee06a'
@@ -16,31 +18,33 @@ def _get_api_token():
select token from sentry_apitoken
where token=:api_token
''')
- token = get_db().engine.execute(s,
- api_token=api_token
- ).fetchall()
+ token = get_db().engine.execute(s,
+ api_token=api_token
+ ).fetchall()
if len(token) == 0:
# token is not present in the Sentry database, let's create it
s = text('''
insert into sentry_apitoken(user_id, token, scopes, date_added, scope_list)
values (1, :api_token, 0, now(), :scope_list)
''')
- get_db().engine.execute(s,
- api_token=api_token,
- scope_list='{event:admin,event:read,'
- 'member:read,member:admin,'
- 'project:read,project:releases,project:admin,project:write,'
- 'team:read,team:write,team:admin,'
- 'org:read,org:write,org:admin}'
- )
+ get_db().engine.execute(s,
+ api_token=api_token,
+ scope_list='{event:admin,event:read,'
+ 'member:read,member:admin,'
+ 'project:read,project:releases,project:admin,project:write,'
+ 'team:read,team:write,team:admin,'
+ 'org:read,org:write,org:admin}'
+ )
return _get_api_token()
else:
# return the first column from the first row of the query result
return token[0][0]
+
def get_token():
return _get_api_token()
+
def get_dsn(appname):
s = text('''
select public_key, p.id
@@ -49,9 +53,9 @@ def get_dsn(appname):
where p.slug=:project_slug
''')
try:
- public_key = get_db().engine.execute(s,
- project_slug=appname
- ).fetchall()
+ public_key = get_db().engine.execute(s,
+ project_slug=appname
+ ).fetchall()
except sqlalchemy.exc.OperationalError:
raise SentryProjectNotFound('Sentry is not initialized.')
diff --git a/applications/common/server/common/test/test_sentry_controller.py b/applications/common/server/common/test/test_sentry_controller.py
index a01d90c91..6f881ad0d 100644
--- a/applications/common/server/common/test/test_sentry_controller.py
+++ b/applications/common/server/common/test/test_sentry_controller.py
@@ -17,7 +17,7 @@ def test_getdsn(self):
Gets the Sentry DSN for a given application
"""
- headers = {
+ headers = {
'Accept': 'application/json',
}
response = self.client.open(
diff --git a/applications/common/server/common/util.py b/applications/common/server/common/util.py
index fcaed08fa..8e7e71e62 100644
--- a/applications/common/server/common/util.py
+++ b/applications/common/server/common/util.py
@@ -68,8 +68,8 @@ def deserialize_date(string):
:rtype: date
"""
if string is None:
- return None
-
+ return None
+
try:
from dateutil.parser import parse
return parse(string).date()
@@ -88,8 +88,8 @@ def deserialize_datetime(string):
:rtype: datetime
"""
if string is None:
- return None
-
+ return None
+
try:
from dateutil.parser import parse
return parse(string)
diff --git a/applications/common/server/setup.py b/applications/common/server/setup.py
index 007ca859b..1935137a4 100644
--- a/applications/common/server/setup.py
+++ b/applications/common/server/setup.py
@@ -4,7 +4,7 @@
from setuptools import setup, find_packages
NAME = "openapi_server"
-VERSION = "2.3.0"
+VERSION = "2.4.0"
# To install the library, run the following
#
@@ -38,4 +38,3 @@
Cloud Harness Platform - Reference CH service API
"""
)
-
diff --git a/applications/common/server/test-requirements.txt b/applications/common/server/test-requirements.txt
index f8f951d74..051bf8404 100644
--- a/applications/common/server/test-requirements.txt
+++ b/applications/common/server/test-requirements.txt
@@ -5,4 +5,4 @@ Flask-Testing==0.8.0
psycopg2-binary==2.8.5
Flask-SQLAlchemy==2.4.3
SQLAlchemy==1.3.17
-requests==2.21.0
+requests>=2.21.0
diff --git a/applications/events/deploy/templates-compose/deployments.yaml b/applications/events/deploy/templates-compose/deployments.yaml
new file mode 100644
index 000000000..0001fde48
--- /dev/null
+++ b/applications/events/deploy/templates-compose/deployments.yaml
@@ -0,0 +1,97 @@
+{{- define "events.deployment" }}
+events:
+ networks:
+ - ch
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ ports:
+ - "9094:9092"
+ expose:
+ - 5555
+ - 9094
+ - 9092
+ environment:
+ - CLASSPATH=/opt/kafka/libs/extensions/*
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ - JMX_PORT=5555
+ command:
+ - "./bin/kafka-server-start.sh"
+ - "/etc/kafka/server.properties"
+ - "--override"
+ - "default.replication.factor=1"
+ - "--override"
+ - "min.insync.replicas=1"
+ - "--override"
+ - "offsets.topic.replication.factor=1"
+ - "--override"
+ - "offsets.topic.num.partitions=1"
+ depends_on:
+ events-kafka-init:
+ condition: service_completed_successfully
+
+events-kafka-init:
+ networks:
+ - ch
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+
+pzoo:
+ networks:
+ - ch
+ expose:
+ - 2181
+ - 2888
+ - 3888
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ command:
+ - "./bin/zookeeper-server-start.sh"
+ - "/etc/kafka/zookeeper.properties"
+ environment:
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ depends_on:
+ events-pzoo-init:
+ condition: service_completed_successfully
+
+events-pzoo-init:
+ networks:
+ - ch
+ expose:
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+ - PZOO_REPLICAS=1
+ - ZOO_REPLICAS=0
+
+zoo:
+ networks:
+ - ch
+ expose:
+ - 2181
+ - 2888
+ - 3888
+ image: solsson/kafka:2.3.0@sha256:b59603a8c0645f792fb54e9571500e975206352a021d6a116b110945ca6c3a1d
+ command:
+ - "./bin/zookeeper-server-start.sh"
+ - "/etc/kafka/zookeeper.properties"
+ environment:
+ - KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties
+ depends_on:
+ events-zoo-init:
+ condition: service_completed_successfully
+
+events-zoo-init:
+ networks:
+ - ch
+ image: solsson/kafka-initutils@sha256:f6d9850c6c3ad5ecc35e717308fddb47daffbde18eb93e98e031128fe8b899ef
+ command:
+ - "/bin/bash"
+ - "/etc/kafka-configmap/init.sh"
+ environment:
+ - PZOO_REPLICAS=1
+ - ZOO_REPLICAS=0
+ - ID_OFFSET=2
+{{- end }}
\ No newline at end of file
diff --git a/applications/jupyterhub/Dockerfile b/applications/jupyterhub/Dockerfile
index 8b279adc0..907ce6725 100755
--- a/applications/jupyterhub/Dockerfile
+++ b/applications/jupyterhub/Dockerfile
@@ -1,31 +1,39 @@
ARG CLOUDHARNESS_BASE
FROM $CLOUDHARNESS_BASE as base
-FROM jupyterhub/k8s-hub:1.1.3
+FROM quay.io/jupyterhub/k8s-hub:3.2.1
USER root
COPY --from=base libraries/models/requirements.txt /libraries/models/requirements.txt
-RUN pip install -r /libraries/models/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/models/requirements.txt
COPY --from=base libraries/cloudharness-common/requirements.txt /libraries/cloudharness-common/requirements.txt
-RUN pip install -r /libraries/cloudharness-common/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/cloudharness-common/requirements.txt
COPY --from=base libraries/client/cloudharness_cli/requirements.txt /libraries/client/cloudharness_cli/requirements.txt
-RUN pip install -r /libraries/client/cloudharness_cli/requirements.txt
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -r /libraries/client/cloudharness_cli/requirements.txt
COPY --from=base libraries/models /libraries/models
-RUN pip install -e /libraries/models
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/models
COPY --from=base libraries/cloudharness-common /libraries/cloudharness-common
COPY --from=base libraries/client/cloudharness_cli /libraries/client/cloudharness_cli
#
-RUN pip install -e /libraries/cloudharness-common
-RUN pip install -e /libraries/client/cloudharness_cli
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/cloudharness-common
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install -e /libraries/client/cloudharness_cli
COPY src src
-RUN pip install ./src/harness_jupyter
-RUN pip install ./src/chauthenticator
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install ./src/harness_jupyter
+RUN --mount=type=cache,target=/root/.cache python -m pip install --upgrade pip &&\
+ pip install ./src/chauthenticator
USER jovyan
diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
index d961d0347..9ad78d2fd 100755
--- a/applications/jupyterhub/README.md
+++ b/applications/jupyterhub/README.md
@@ -31,3 +31,14 @@ To support the pre pulling of task images see (https://github.com/MetaCell/cloud
the template `templates/image-puller/_helpers-daemonset.tpl` has been changed (see line 167 and on)
TODO: remember to implement/revise this code after you have updated/changed the templates of JupyterHub
+
+## How to update
+
+The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart.
+
+1. Run update.sh [TAG] # Do not use latest!
+2. Restore from the diff files with EDIT: CLOUDHARNESS. Use update.patch as a reference
+3. 3. Update Dockerfile to use the same base image you see on values.yaml: hub/image
+
+Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG]
+
diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
index d4b3cee2b..c3318bac7 100755
--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
@@ -1,58 +1,39 @@
+# load the config object (satisfies linters)
+
+c = get_config() # noqa
+
+import glob
import os
import re
import sys
-import logging
-from tornado.httpclient import AsyncHTTPClient
-from kubernetes import client
from jupyterhub.utils import url_path_join
+from kubernetes_asyncio import client
+from tornado.httpclient import AsyncHTTPClient
+
+# CLOUDHARNESS: EDIT START
+import logging
try:
from harness_jupyter.jupyterhub import harness_hub
- harness_hub() # activates harness hooks on jupyterhub
+ harness_hub() # activates harness hooks on jupyterhub
except Exception as e:
logging.error("could not import harness_jupyter", exc_info=True)
-
+# CLOUDHARNESS: EDIT END
# Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
configuration_directory = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, configuration_directory)
-from z2jh import (
+from z2jh import ( # noqa
get_config,
- set_config_if_not_none,
get_name,
get_name_env,
get_secret_value,
+ set_config_if_not_none,
)
-print('Base url is', c.JupyterHub.get('base_url', '/'))
-
-# Configure JupyterHub to use the curl backend for making HTTP requests,
-# rather than the pure-python implementations. The default one starts
-# being too slow to make a large number of requests to the proxy API
-# at the rate required.
-AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
-
-c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
-
-# Connect to a proxy running in a different pod
-c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
-c.ConfigurableHTTPProxy.should_start = False
-
-# Do not shut down user pods when hub is restarted
-c.JupyterHub.cleanup_servers = False
-
-# Check that the proxy has routes appropriately setup
-c.JupyterHub.last_activity_interval = 60
-
-# Don't wait at all before redirecting a spawning user to the progress page
-c.JupyterHub.tornado_settings = {
- 'slow_spawn_timeout': 0,
-}
-
-
def camelCaseify(s):
"""convert snake_case to camelCase
@@ -173,6 +154,7 @@ def camelCaseify(s):
("events_enabled", "events"),
("extra_labels", None),
("extra_annotations", None),
+ # ("allow_privilege_escalation", None), # Managed manually below
("uid", None),
("fs_gid", None),
("service_account", "serviceAccountName"),
@@ -206,10 +188,19 @@ def camelCaseify(s):
if image:
tag = get_config("singleuser.image.tag")
if tag:
- image = "{}:{}".format(image, tag)
+ image = f"{image}:{tag}"
c.KubeSpawner.image = image
+# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a
+# property where None, False, and True all are valid values that users of the
+# Helm chart may want to set, we can't use the set_config_if_not_none helper
+# function as someone may want to override the default False value to None.
+#
+c.KubeSpawner.allow_privilege_escalation = get_config(
+ "singleuser.allowPrivilegeEscalation"
+)
+
# Combine imagePullSecret.create (single), imagePullSecrets (list), and
# singleuser.image.pullSecrets (list).
image_pull_secrets = []
@@ -255,7 +246,7 @@ def camelCaseify(s):
pass
else:
raise ValueError(
- "Unrecognized value for matchNodePurpose: %r" % match_node_purpose
+ f"Unrecognized value for matchNodePurpose: {match_node_purpose}"
)
# Combine the common tolerations for user pods with singleuser tolerations
@@ -271,7 +262,7 @@ def camelCaseify(s):
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
c.KubeSpawner.pvc_name_template = pvc_name_template
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
- c.KubeSpawner.storage_pvc_ensure = False
+ c.KubeSpawner.storage_pvc_ensure = True
set_config_if_not_none(
c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass"
)
@@ -354,41 +345,62 @@ def camelCaseify(s):
)
c.JupyterHub.services = []
+c.JupyterHub.load_roles = []
+# jupyterhub-idle-culler's permissions are scoped to what it needs only, see
+# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions.
+#
if get_config("cull.enabled", False):
+ jupyterhub_idle_culler_role = {
+ "name": "jupyterhub-idle-culler",
+ "scopes": [
+ "list:users",
+ "read:users:activity",
+ "read:servers",
+ "delete:servers",
+ # "admin:users", # dynamically added if --cull-users is passed
+ ],
+ # assign the role to a jupyterhub service, so it gains these permissions
+ "services": ["jupyterhub-idle-culler"],
+ }
+
cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"]
base_url = c.JupyterHub.get("base_url", "/")
cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api"))
cull_timeout = get_config("cull.timeout")
if cull_timeout:
- cull_cmd.append("--timeout=%s" % cull_timeout)
+ cull_cmd.append(f"--timeout={cull_timeout}")
cull_every = get_config("cull.every")
if cull_every:
- cull_cmd.append("--cull-every=%s" % cull_every)
+ cull_cmd.append(f"--cull-every={cull_every}")
cull_concurrency = get_config("cull.concurrency")
if cull_concurrency:
- cull_cmd.append("--concurrency=%s" % cull_concurrency)
+ cull_cmd.append(f"--concurrency={cull_concurrency}")
if get_config("cull.users"):
cull_cmd.append("--cull-users")
+ jupyterhub_idle_culler_role["scopes"].append("admin:users")
+
+ if not get_config("cull.adminUsers"):
+ cull_cmd.append("--cull-admin-users=false")
if get_config("cull.removeNamedServers"):
cull_cmd.append("--remove-named-servers")
cull_max_age = get_config("cull.maxAge")
if cull_max_age:
- cull_cmd.append("--max-age=%s" % cull_max_age)
+ cull_cmd.append(f"--max-age={cull_max_age}")
c.JupyterHub.services.append(
{
- "name": "cull-idle",
- "admin": True,
+ "name": "jupyterhub-idle-culler",
"command": cull_cmd,
}
)
+ c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role)
for key, service in get_config("hub.services", {}).items():
# c.JupyterHub.services is a list of dicts, but
@@ -402,26 +414,44 @@ def camelCaseify(s):
c.JupyterHub.services.append(service)
+for key, role in get_config("hub.loadRoles", {}).items():
+ # c.JupyterHub.load_roles is a list of dicts, but
+ # hub.loadRoles is a dict of dicts to make the config mergable
+ role.setdefault("name", key)
+
+ c.JupyterHub.load_roles.append(role)
+
+# respect explicit null command (distinct from unspecified)
+# this avoids relying on KubeSpawner.cmd's default being None
+_unspecified = object()
+specified_cmd = get_config("singleuser.cmd", _unspecified)
+if specified_cmd is not _unspecified:
+ c.Spawner.cmd = specified_cmd
-set_config_if_not_none(c.Spawner, "cmd", "singleuser.cmd")
set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl")
-cloud_metadata = get_config("singleuser.cloudMetadata", {})
+cloud_metadata = get_config("singleuser.cloudMetadata")
if cloud_metadata.get("blockWithIptables") == True:
# Use iptables to block access to cloud metadata by default
network_tools_image_name = get_config("singleuser.networkTools.image.name")
network_tools_image_tag = get_config("singleuser.networkTools.image.tag")
+ network_tools_resources = get_config("singleuser.networkTools.resources")
+ ip = cloud_metadata["ip"]
ip_block_container = client.V1Container(
name="block-cloud-metadata",
image=f"{network_tools_image_name}:{network_tools_image_tag}",
command=[
"iptables",
- "-A",
+ "--append",
"OUTPUT",
- "-d",
- cloud_metadata.get("ip", "169.254.169.254"),
- "-j",
+ "--protocol",
+ "tcp",
+ "--destination",
+ ip,
+ "--destination-port",
+ "80",
+ "--jump",
"DROP",
],
security_context=client.V1SecurityContext(
@@ -429,6 +459,7 @@ def camelCaseify(s):
run_as_user=0,
capabilities=client.V1Capabilities(add=["NET_ADMIN"]),
),
+ resources=network_tools_resources,
)
c.KubeSpawner.init_containers.append(ip_block_container)
@@ -438,17 +469,6 @@ def camelCaseify(s):
c.JupyterHub.log_level = "DEBUG"
c.Spawner.debug = True
-# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
-config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
-if os.path.isdir(config_dir):
- for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
- file_name = os.path.basename(file_path)
- print(f"Loading {config_dir} config: {file_name}")
- with open(file_path) as f:
- file_content = f.read()
- # compiling makes debugging easier: https://stackoverflow.com/a/437857
- exec(compile(source=file_content, filename=file_name, mode="exec"))
-
# load potentially seeded secrets
#
# NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable
@@ -471,19 +491,32 @@ def camelCaseify(s):
cfg.pop("keys", None)
c[app].update(cfg)
+
+# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
+config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
+if os.path.isdir(config_dir):
+ for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
+ file_name = os.path.basename(file_path)
+ print(f"Loading {config_dir} config: {file_name}")
+ with open(file_path) as f:
+ file_content = f.read()
+ # compiling makes debugging easier: https://stackoverflow.com/a/437857
+ exec(compile(source=file_content, filename=file_name, mode="exec"))
+
# execute hub.extraConfig entries
for key, config_py in sorted(get_config("hub.extraConfig", {}).items()):
- print("Loading extra config: %s" % key)
+ print(f"Loading extra config: {key}")
exec(config_py)
+# CLOUDHARNESS: EDIT START
# Allow switching authenticators easily
auth_type = get_config('hub.config.JupyterHub.authenticator_class')
email_domain = 'local'
common_oauth_traits = (
- ('client_id', None),
- ('client_secret', None),
- ('oauth_callback_url', 'callbackUrl'),
+ ('client_id', None),
+ ('client_secret', None),
+ ('oauth_callback_url', 'callbackUrl'),
)
print("Auth type", auth_type)
if auth_type == 'ch':
@@ -504,6 +537,7 @@ def camelCaseify(s):
c.Authenticator.auto_login = True
c.OAuthenticator.client_id = client_id
c.OAuthenticator.client_secret = client_secret
+ c.OAuthenticator.allow_all = True
c.GenericOAuthenticator.login_service = "CH"
c.GenericOAuthenticator.username_key = "email"
@@ -525,4 +559,5 @@ def camelCaseify(s):
c.apps = get_config('apps')
c.registry = get_config('registry')
c.domain = get_config('root.domain')
-c.namespace = get_config('root.namespace')
\ No newline at end of file
+c.namespace = get_config('root.namespace')
+# CLOUDHARNESS: EDIT END
diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
index 834a6b6c8..24bba5552 100755
--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py
+++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
@@ -3,15 +3,15 @@
Methods here can be imported by extraConfig in values.yaml
"""
-from collections import Mapping
-from functools import lru_cache
import os
-import re
+from collections.abc import Mapping
+from functools import lru_cache
import yaml
+
# memoize so we only load config once
-@lru_cache()
+@lru_cache
def _load_config():
"""Load the Helm chart configuration used to render the Helm templates of
the chart from a mounted k8s Secret, and merge in values from an optionally
@@ -27,6 +27,7 @@ def _load_config():
cfg = _merge_dictionaries(cfg, values)
else:
print(f"No config at {path}")
+ # EDIT: CLOUDHARNESS START
path = f"/opt/cloudharness/resources/allvalues.yaml"
if os.path.exists(path):
print("Loading global CloudHarness config at", path)
@@ -34,11 +35,11 @@ def _load_config():
values = yaml.safe_load(f)
cfg = _merge_dictionaries(cfg, values)
cfg['root'] = values
-
+ # EDIT: CLOUDHARNESS END
return cfg
-@lru_cache()
+@lru_cache
def _get_config_value(key):
"""Load value from the k8s ConfigMap given a key."""
@@ -50,7 +51,7 @@ def _get_config_value(key):
raise Exception(f"{path} not found!")
-@lru_cache()
+@lru_cache
def get_secret_value(key, default="never-explicitly-set"):
"""Load value from the user managed k8s Secret or the default k8s Secret
given a key."""
@@ -117,17 +118,19 @@ def get_config(key, default=None):
else:
value = value[level]
-
+ # EDIT: CLOUDHARNESS START
+ import re
if value and isinstance(value, str):
- replace_var = re.search("{{.*?}}", value)
+ replace_var = re.search("{{.*?}}", value)
if replace_var:
variable = replace_var.group(0)[2:-2].strip()
repl = get_config(variable)
-
+
if repl:
print("replace", variable, "in", value, ":", repl)
value = re.sub("{{.*?}}", repl, value)
+ # EDIT: CLOUDHARNESS END
return value
@@ -137,6 +140,5 @@ def set_config_if_not_none(cparent, name, key):
configuration item if not None
"""
data = get_config(key)
-
if data is not None:
- setattr(cparent, name, data)
\ No newline at end of file
+ setattr(cparent, name, data)
diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt
new file mode 100644
index 000000000..9769a9c72
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/NOTES.txt
@@ -0,0 +1,158 @@
+{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
+
+{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
+. __ __ __ __ __
+ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
+ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
+/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
+\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
+ /_/ /____/
+
+ You have successfully installed the official JupyterHub Helm chart!
+
+### Installation info
+
+ - Kubernetes namespace: {{ .Release.Namespace }}
+ - Helm release name: {{ .Release.Name }}
+ - Helm chart version: {{ .Chart.Version }}
+ - JupyterHub version: {{ .Chart.AppVersion }}
+ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
+
+### Followup links
+
+ - Documentation: https://z2jh.jupyter.org
+ - Help forum: https://discourse.jupyter.org
+ - Social chat: https://gitter.im/jupyterhub/jupyterhub
+ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
+
+### Post-installation checklist
+
+ - Verify that created Pods enter a Running state:
+
+ kubectl --namespace={{ .Release.Namespace }} get pod
+
+ If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe pod
+
+ If a pod keeps restarting, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} logs --previous
+ {{- println }}
+
+ {{- if eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer" }}
+ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
+
+ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
+
+ If the external ip remains , diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
+ {{- end }}
+
+ - Verify web based access:
+ {{- println }}
+ {{- if .Values.apps.jupyterhub.ingress.enabled }}
+ {{- range $host := .Values.apps.jupyterhub.ingress.hosts }}
+ Try insecure HTTP access: http://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+
+ {{- range $tls := .Values.apps.jupyterhub.ingress.tls }}
+ {{- range $host := $tls.hosts }}
+ Try secure HTTPS access: https://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+ {{- end }}
+ {{- else }}
+ You have not configured a k8s Ingress resource so you need to access the k8s
+ Service {{ $proxy_service }} directly.
+ {{- println }}
+
+ {{- if eq .Values.apps.jupyterhub.proxy.service.type "NodePort" }}
+ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
+ that all the k8s cluster's nodes are exposing the k8s Service via those
+ ports.
+
+ Try insecure HTTP access: http://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
+ Try secure HTTPS access: https://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
+
+ {{- else }}
+ If your computer is outside the k8s cluster, you can port-forward traffic to
+ the k8s Service {{ $proxy_service }} with kubectl to access it from your
+ computer.
+
+ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
+
+ Try insecure HTTP access: http://localhost:8080
+ {{- end }}
+ {{- end }}
+ {{- println }}
+
+
+
+
+
+{{- /*
+ Warnings for likely misconfigurations
+*/}}
+
+{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }}
+#################################################################################
+###### WARNING: You are using user placeholders without pod priority #####
+###### enabled*, either enable pod priority or stop using the #####
+###### user placeholders** to avoid having placeholders that #####
+###### refuse to make room for a real user. #####
+###### #####
+###### *scheduling.podPriority.enabled #####
+###### **scheduling.userPlaceholder.enabled #####
+###### **scheduling.userPlaceholder.replicas #####
+#################################################################################
+{{- println }}
+{{- end }}
+
+
+
+
+
+{{- /*
+ Breaking changes and failures for likely misconfigurations.
+*/}}
+
+{{- $breaking := "" }}
+{{- $breaking_title := "\n" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
+{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
+{{- $breaking_title = print $breaking_title "\n###### #####" }}
+{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
+{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+
+
+{{- /*
+ This is an example (in a helm template comment) on how to detect and
+ communicate with regards to a breaking chart config change.
+
+ {{- if hasKey .Values.apps.jupyterhub.singleuser.cloudMetadata "enabled" }}
+ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
+ {{- end }}
+*/}}
+
+
+{{- if hasKey .Values.apps.jupyterhub.rbac "enabled" }}
+{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }}
+{{- end }}
+
+
+{{- if hasKey .Values.apps.jupyterhub.hub "fsGid" }}
+{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
+{{- end }}
+
+
+{{- if and .Values.apps.jupyterhub.singleuser.cloudMetadata.blockWithIptables (and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled .Values.apps.jupyterhub.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }}
+{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }}
+{{- end }}
+
+
+{{- if $breaking }}
+{{- fail (print $breaking_title $breaking "\n\n") }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
index b742a1266..e9d2b4f42 100644
--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
@@ -168,30 +168,30 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
{{- $c := dict }}
{{- $result := (dict "hub" (dict "config" $c)) }}
{{- /*
- Flattens the config in .Values.apps.jupyterhub.auth to a format of
+ Flattens the config in .Values.apps.jupyterhub.apps.jupyterhub.auth to a format of
"keyX.keyY...": "value". Writes output to $c.
*/}}
- {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.auth "type" "custom")) }}
+ {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.apps.jupyterhub.auth "type" "custom")) }}
{{- /*
Transform the flattened config using a dictionary
representing the old z2jh config, output the result
in $c.
*/}}
- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.global.safeToShowValues) }}
+ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub) }}
- {{- $class_old_config_key := .Values.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
+ {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
{{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}}
{{- $class_new_config_key := "" }} {{- /* LDAPAuthenticator - GitHubOAuthenticator */}}
{{- /* SET $class_new_entrypoint, $class_new_config_key */}}
{{- if eq $class_old_config_key "custom" }}
- {{- $class_new_entrypoint = .Values.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
+ {{- $class_new_entrypoint = .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
{{- $class_new_config_key = $class_new_entrypoint | splitList "." | last }}
{{- /* UPDATE c dict explicitly with auth.custom.config */}}
- {{- if .Values.apps.jupyterhub.auth.custom.config }}
- {{- $custom_config := merge (dict) .Values.apps.jupyterhub.auth.custom.config }}
- {{- if not .Values.apps.jupyterhub.global.safeToShowValues }}
+ {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
+ {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
+ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub }}
{{- range $key, $val := $custom_config }}
{{- $_ := set $custom_config $key "***" }}
{{- end }}
@@ -213,7 +213,7 @@ The JupyterHub Helm chart's auth config has been reworked and requires changes.
The new way to configure authentication in chart version 0.11.0+ is printed
below for your convenience. The values are not shown by default to ensure no
-secrets are exposed, run helm upgrade with --set global.safeToShowValues=true
+secrets are exposed, run helm upgrade with --set global.safeToSho.Values.apps.jupyterhub.true
to show them.
{{ $result | toYaml }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
index e9cf7bb64..401d601a6 100644
--- a/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
@@ -3,8 +3,8 @@
parent charts to reference these dynamic resource names.
To avoid duplicating documentation, for more information, please see the the
- fullnameOverride entry in schema.yaml or the configuration reference that
- schema.yaml renders to.
+ fullnameOverride entry in values.schema.yaml or the configuration reference
+ that values.schema.yaml renders to.
https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
*/}}
@@ -38,8 +38,8 @@
{{- $name_override := .Values.apps.jupyterhub.nameOverride }}
{{- if ne .Chart.Name "jupyterhub" }}
{{- if .Values.apps.jupyterhub.jupyterhub }}
- {{- $fullname_override = .Values.apps.jupyterhub.fullnameOverride }}
- {{- $name_override = .Values.apps.jupyterhub.nameOverride }}
+ {{- $fullname_override = .Values.apps.jupyterhub.jupyterhub.fullnameOverride }}
+ {{- $name_override = .Values.apps.jupyterhub.jupyterhub.nameOverride }}
{{- end }}
{{- end }}
@@ -76,12 +76,23 @@
{{- include "jupyterhub.fullname.dash" . }}hub
{{- end }}
+{{- /* hub-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.hub.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* hub-existing-secret Secret */}}
{{- define "jupyterhub.hub-existing-secret.fullname" -}}
{{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
{{- $existing_secret := .Values.apps.jupyterhub.hub.existingSecret }}
{{- if ne .Chart.Name "jupyterhub" }}
- {{- $existing_secret = .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- if .Values.apps.jupyterhub.jupyterhub }}
+ {{- $existing_secret = .Values.apps.jupyterhub.jupyterhub.hub.existingSecret }}
+ {{- end }}
{{- end }}
{{- if $existing_secret }}
{{- $existing_secret }}
@@ -133,11 +144,29 @@
{{- include "jupyterhub.fullname.dash" . }}autohttps
{{- end }}
+{{- /* autohttps-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.proxy.traefik.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler Deployment */}}
{{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
{{- include "jupyterhub.fullname.dash" . }}user-scheduler
{{- end }}
+{{- /* user-scheduler-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler leader election lock resource */}}
{{- define "jupyterhub.user-scheduler-lock.fullname" -}}
{{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
@@ -153,6 +182,15 @@
{{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
{{- end }}
+{{- /* image-awaiter-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
+ {{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create }}
+ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
+ {{- else }}
+ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
{{- /* hook-image-puller DaemonSet */}}
{{- define "jupyterhub.hook-image-puller.fullname" -}}
{{- include "jupyterhub.fullname.dash" . }}hook-image-puller
@@ -210,6 +248,15 @@
{{- end }}
{{- end }}
+{{- /* image-puller Priority */}}
+{{- define "jupyterhub.image-puller-priority.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname.dash" . }}image-puller
+ {{- else }}
+ {{- .Release.Name }}-image-puller-priority
+ {{- end }}
+{{- end }}
+
{{- /* user-scheduler's registered name */}}
{{- define "jupyterhub.user-scheduler.fullname" -}}
{{- if (include "jupyterhub.fullname" .) }}
@@ -231,6 +278,7 @@
fullname: {{ include "jupyterhub.fullname" . | quote }}
fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
hub: {{ include "jupyterhub.hub.fullname" . | quote }}
+hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
@@ -241,10 +289,14 @@ proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
+autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
+user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
+image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
+hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
new file mode 100644
index 000000000..4075569ef
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
@@ -0,0 +1,101 @@
+{{- /*
+ This named template renders egress rules for NetworkPolicy resources based on
+ common configuration.
+
+ It is rendering based on the `egressAllowRules` and `egress` keys of the
+ passed networkPolicy config object. Each flag set to true under
+ `egressAllowRules` is rendered to a egress rule that next to any custom user
+ defined rules from the `egress` config.
+
+ This named template needs to render based on a specific networkPolicy
+ resource, but also needs access to the root context. Due to that, it
+ accepts a list as its scope, where the first element is supposed to be the
+ root context and the second element is supposed to be the networkPolicy
+ configuration object.
+
+ As an example, this is how you would render this named template from a
+ NetworkPolicy resource under its egress:
+
+ egress:
+ # other rules here...
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+
+ Note that the reference to privateIPs and nonPrivateIPs relate to
+ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
+*/}}
+
+{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
+{{- $root := index . 0 }}
+{{- $netpol := index . 1 }}
+{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }}
+- ports:
+ - port: 53
+ protocol: UDP
+ - port: 53
+ protocol: TCP
+ to:
+ {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }}
+ # Allow outbound connections to DNS ports on the cloud metadata server
+ - ipBlock:
+ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+ {{- end }}
+ {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }}
+ # Allow outbound connections to DNS ports on pods in the kube-system
+ # namespace
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ {{- end }}
+ {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
+ # Allow outbound connections to DNS ports on destinations in the private IP
+ # ranges
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+ {{- end }}
+{{- end }}
+
+{{- if $netpol.egressAllowRules.nonPrivateIPs }}
+# Allow outbound connections to non-private IP ranges
+- to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ except:
+ # As part of this rule:
+ # - don't allow outbound connections to private IPs
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ # - don't allow outbound connections to the cloud metadata server
+ - {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- if $netpol.egressAllowRules.privateIPs }}
+# Allow outbound connections to private IP ranges
+- to:
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+{{- end }}
+
+{{- if $netpol.egressAllowRules.cloudMetadataServer }}
+# Allow outbound connections to the cloud metadata server
+- to:
+ - ipBlock:
+ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- with $netpol.egress }}
+# Allow outbound connections based on user specified rules
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
index efea86d1d..1737f3d6e 100755
--- a/applications/jupyterhub/deploy/templates/_helpers.tpl
+++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
@@ -12,7 +12,7 @@
When you ask a helper to render its content, one often forward the current
scope to the helper in order to allow it to access .Release.Name,
- .Values.apps.jupyterhub.rbac.enabled and similar values.
+ .Values.apps.jupyterhub.rbac.create and similar values.
#### Example - Passing the current scope
{{ include "jupyterhub.commonLabels" . }}
@@ -180,8 +180,51 @@ component: {{ include "jupyterhub.componentLabel" . }}
Augments passed .pullSecrets with $.Values.apps.jupyterhub.imagePullSecrets
*/}}
{{- define "jupyterhub.imagePullSecrets" -}}
+ {{- /*
+ We have implemented a trick to allow a parent chart depending on this
+ chart to call this named templates.
+
+ Caveats and notes:
+
+ 1. While parent charts can reference these, grandparent charts can't.
+ 2. Parent charts must not use an alias for this chart.
+ 3. There is no failsafe workaround to above due to
+ https://github.com/helm/helm/issues/9214.
+ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
+ using "toYaml | fromYaml" in order to be able to use normal helm
+ template functions on it.
+ */}}
+ {{- $jupyterhub_values := .root.Values.apps.jupyterhub }}
+ {{- if ne .root.Chart.Name "jupyterhub" }}
+ {{- if .root.Values.apps.jupyterhub.jupyterhub }}
+ {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }}
+ {{- end }}
+ {{- end }}
+ {{- /* Populate $_.list with all relevant entries */}}
+ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
+ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
+ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
+ {{- end }}
+ {{- /* Decide if something should be written */}}
+ {{- if not (eq ($_.list | toJson) "[]") }}
+
+ {{- /* Process the $_.list where strings become dicts with a name key and the
+ strings become the name keys' values into $_.res */}}
+ {{- $_ := set $_ "res" list }}
+ {{- range $_.list }}
+ {{- if eq (typeOf .) "string" }}
+ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
+ {{- else }}
+ {{- $__ := set $_ "res" (append $_.res .) }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* Write the results */}}
+ {{- $_.res | toJson }}
+
+ {{- end }}
{{- end }}
{{- /*
@@ -339,3 +382,21 @@ limits:
{{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
{{- end }}
{{- end }}
+
+{{- /*
+ jupyterhub.chart-version-to-git-ref:
+ Renders a valid git reference from a chartpress generated version string.
+ In practice, either a git tag or a git commit hash will be returned.
+
+ - The version string will follow a chartpress pattern, see
+ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
+
+ - The regexReplaceAll function is a sprig library function, see
+ https://masterminds.github.io/sprig/strings.html.
+
+ - The regular expression is in golang syntax, but \d had to become \\d for
+ example.
+*/}}
+{{- define "jupyterhub.chart-version-to-git-ref" -}}
+{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
index c913f678b..f52feb6a8 100755
--- a/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
@@ -29,5 +29,6 @@ data:
*/}}
checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
+ # EDIT: CLOUDHARNESS
allvalues.yaml: |
{{- .Values | toYaml | nindent 4 }}
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
index 82132c628..d105eccaf 100755
--- a/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
@@ -5,6 +5,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
+ {{- end }}
replicas: 1
selector:
matchLabels:
@@ -30,11 +33,14 @@ spec:
{{- . | toYaml | nindent 8 }}
{{- end }}
spec:
-{{ include "deploy_utils.etcHosts" . | indent 6 }}
+{{ include "deploy_utils.etcHosts" . | indent 6 }} # EDIT: CLOUDHARNESS
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.hub.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.hub.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -44,7 +50,7 @@ spec:
- name: config
configMap:
name: {{ include "jupyterhub.hub.fullname" . }}
- {{- /* This is needed by cloudharness libraries */}}
+ {{- /* EDIT: CLOUDHARNESS This is needed by cloudharness libraries */}}
- name: cloudharness-allvalues
configMap:
name: cloudharness-allvalues
@@ -82,11 +88,13 @@ spec:
persistentVolumeClaim:
claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
{{- end }}
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.hub.fullname" . }}
+ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
{{- end }}
+ {{- with .Values.apps.jupyterhub.hub.podSecurityContext }}
securityContext:
- fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.hub.image) }}
imagePullSecrets: {{ . }}
{{- end }}
@@ -153,14 +161,14 @@ spec:
name: config
- mountPath: /usr/local/etc/jupyterhub/secret/
name: secret
- - name: cloudharness-allvalues
+ - name: cloudharness-allvalues # EDIT: CLOUDHARNESS START
mountPath: /opt/cloudharness/resources/allvalues.yaml
subPath: allvalues.yaml
{{- if .Values.apps.accounts }}
- name: cloudharness-kc-accounts
mountPath: /opt/cloudharness/resources/auth
readOnly: true
- {{- end }}
+ {{- end }} # EDIT: CLOUDHARNESS END
{{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
- mountPath: /usr/local/etc/jupyterhub/existing-secret/
name: existing-secret
diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
index 9a7a6bc12..d9508e20c 100755
--- a/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
@@ -61,31 +61,24 @@ spec:
egress:
# hub --> proxy
- - ports:
- - port: 8001
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "proxy") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8001
+
# hub --> singleuser-server
- - ports:
- - port: 8888
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
- # hub --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
-
- {{- with .Values.apps.jupyterhub.hub.networkPolicy.egress }}
- # hub --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
index 855609d41..bb6c7b16d 100755
--- a/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
@@ -1,9 +1,5 @@
{{- if .Values.apps.jupyterhub.hub.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.hub.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
index 738daab15..1b689af49 100755
--- a/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
@@ -1,15 +1,4 @@
-{{- if .Values.apps.jupyterhub.rbac.enabled -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.hub.fullname" . }}
- {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
----
+{{- if .Values.apps.jupyterhub.rbac.create -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -32,7 +21,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.hub.fullname" . }}
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: Role
diff --git a/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
new file mode 100644
index 000000000..817ed661f
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.apps.jupyterhub.hub.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-pull-secret.yaml b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
new file mode 100644
index 000000000..b7544db72
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.apps.jupyterhub.imagePullSecret.create }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation
+ "helm.sh/hook-weight": "-20"
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
index e16fd1a9e..04fb18a32 100644
--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
@@ -34,6 +34,9 @@ spec:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
+ {{- if typeIs "int" .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
+ {{- end }}
template:
metadata:
labels:
@@ -44,13 +47,17 @@ spec:
{{- end }}
spec:
{{- /*
- continuous-image-puller pods are made evictable to save on the k8s pods
- per node limit all k8s clusters have.
+ image-puller pods are made evictable to save on the k8s pods
+ per node limit all k8s clusters have and have a higher priority
+ than user-placeholder pods that could block an entire node.
*/}}
- {{- if and (not .hook) .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
- priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
{{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations .Values.apps.jupyterhub.prePuller.extraTolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -127,6 +134,7 @@ spec:
{{- /* --- Conditionally pull profileList images --- */}}
{{- if .Values.apps.jupyterhub.prePuller.pullProfileListImages }}
{{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }}
+ {{- /* profile's kubespawner_override */}}
{{- if $container.kubespawner_override }}
{{- if $container.kubespawner_override.image }}
- name: image-pull-singleuser-profilelist-{{ $k }}
@@ -145,6 +153,33 @@ spec:
{{- end }}
{{- end }}
{{- end }}
+ {{- /* kubespawner_override in profile's profile_options */}}
+ {{- if $container.profile_options }}
+ {{- range $option, $option_spec := $container.profile_options }}
+ {{- if $option_spec.choices }}
+ {{- range $choice, $choice_spec := $option_spec.choices }}
+ {{- if $choice_spec.kubespawner_override }}
+ {{- if $choice_spec.kubespawner_override.image }}
+ - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }}
+ image: {{ $choice_spec.kubespawner_override.image }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with $.Values.apps.jupyterhub.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
{{- end }}
{{- end }}
@@ -164,12 +199,12 @@ spec:
securityContext:
{{- . | toYaml | nindent 12 }}
{{- end }}
- {{- end }}
-
- {{- /* --- Pull CloudHarness tasks images --- */}}
- {{- range $k, $v := ( index .Values "task-images" ) }}
- - name: image-pull-{{ $k | replace "-" "" }}
- image: {{ $v }}
+ {{- end }}
+ {{- /* --- EDIT: CLOUDHARNESS pull images --- */}}
+ {{- if $.Values.apps.jupyterhub.harness.dependencies.prepull -}}
+ {{- range $k, $v := $.Values.apps.jupyterhub.harness.dependencies.prepull }}
+ - name: image-pull--{{ $v }}
+ image: {{ get ( get $.Values "task-images" ) $v }}
command:
- /bin/sh
- -c
@@ -183,6 +218,8 @@ spec:
{{- . | toYaml | nindent 12 }}
{{- end }}
{{- end }}
+ {{- end }}
+ {{- /* --- END EDIT: CLOUDHARNESS pull images --- */}}
containers:
- name: pause
image: {{ .Values.apps.jupyterhub.prePuller.pause.image.name }}:{{ .Values.apps.jupyterhub.prePuller.pause.image.tag }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
index bdd9f63c0..cc6db3ecf 100755
--- a/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
@@ -28,16 +28,22 @@ spec:
labels:
{{- /* Changes here will cause the Job to restart the pods. */}}
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.prePuller.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with .Values.apps.jupyterhub.prePuller.annotations }}
annotations:
{{- . | toYaml | nindent 8 }}
{{- end }}
spec:
restartPolicy: Never
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.prePuller.hook.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -58,6 +64,7 @@ spec:
- -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
- -namespace={{ .Release.Namespace }}
- -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
+ - -pod-scheduling-wait-duration={{ .Values.apps.jupyterhub.prePuller.hook.podSchedulingWaitDuration }}
{{- with .Values.apps.jupyterhub.prePuller.hook.containerSecurityContext }}
securityContext:
{{- . | toYaml | nindent 12 }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
new file mode 100644
index 000000000..1a3fca335
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+{{- if or .Values.apps.jupyterhub.prePuller.hook.enabled .Values.apps.jupyterhub.prePuller.continuous.enabled -}}
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ annotations:
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+value: {{ .Values.apps.jupyterhub.scheduling.podPriority.imagePullerPriority }}
+globalDefault: false
+description: >-
+ Enables [hook|continuous]-image-puller pods to fit on nodes even though they
+ are clogged by user-placeholder pods, while not evicting normal user pods.
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
index 95c86ddf0..5946896b6 100755
--- a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
@@ -1,29 +1,8 @@
{{- /*
Permissions to be used by the hook-image-awaiter job
*/}}
-{{- if .Values.apps.jupyterhub.rbac.enabled }}
-{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) }}
-{{- /*
-This service account...
-*/ -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- hub.jupyter.org/deletable: "true"
- annotations:
- "helm.sh/hook": pre-install,pre-upgrade
- "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
- "helm.sh/hook-weight": "0"
- {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
- {{- . | toYaml | nindent 4 }}
- {{- end }}
----
-{{- /*
-... will be used by this role...
-*/}}
+{{- if .Values.apps.jupyterhub.rbac.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -56,7 +35,7 @@ metadata:
"helm.sh/hook-weight": "0"
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: Role
diff --git a/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
new file mode 100644
index 000000000..2e5fa7286
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- /*
+ServiceAccount for the pre-puller hook's image-awaiter-job
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+ {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
deleted file mode 100755
index 08bd7bbab..000000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Automatic HTTPS Terminator
-
-This directory has Kubernetes objects for automatic Let's Encrypt Support.
-When enabled, we create a new deployment object that has an nginx-ingress
-and kube-lego container in it. This is responsible for requesting,
-storing and renewing certificates as needed from Let's Encrypt.
-
-The only change required outside of this directory is in the `proxy-public`
-service, which targets different hubs based on automatic HTTPS status.
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
deleted file mode 100755
index 8d71a9716..000000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-{{- $_ := .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
-
-# This configmap contains Traefik configuration files to be mounted.
-# - traefik.yaml will only be read during startup (static configuration)
-# - dynamic.yaml will be read on change (dynamic configuration)
-#
-# ref: https://docs.traefik.io/getting-started/configuration-overview/
-#
-# The configuration files are first rendered with Helm templating to large YAML
-# strings. Then we use the fromYAML function on these strings to get an object,
-# that we in turn merge with user provided extra configuration.
-#
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-data:
- traefik.yaml: |
- {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
- dynamic.yaml: |
- {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
-
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
deleted file mode 100755
index fcb062fd0..000000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-spec:
- replicas: 1
- selector:
- matchLabels:
- {{- include "jupyterhub.matchLabels" . | nindent 6 }}
- template:
- metadata:
- labels:
- {{- include "jupyterhub.matchLabels" . | nindent 8 }}
- hub.jupyter.org/network-access-proxy-http: "true"
- {{- with .Values.apps.jupyterhub.proxy.traefik.labels }}
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- annotations:
- # Only force a restart through a change to this checksum when the static
- # configuration is changed, as the dynamic can be updated after start.
- # Any disruptions to this deployment impacts everything, it is the
- # entrypoint of all network traffic.
- checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
- spec:
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.autohttps.fullname" . }}
- {{- end }}
- {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
- priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
- {{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.traefik.nodeSelector }}
- {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.traefik.tolerations }}
- tolerations:
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
- volumes:
- - name: certificates
- emptyDir: {}
- - name: traefik-config
- configMap:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumes }}
- {{- . | toYaml | nindent 8 }}
- {{- end }}
- {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.proxy.traefik.image) }}
- imagePullSecrets: {{ . }}
- {{- end }}
- initContainers:
- - name: load-acme
- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- args:
- - load
- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- - acme.json
- - /etc/acme/acme.json
- env:
- # We need this to get logs immediately
- - name: PYTHONUNBUFFERED
- value: "True"
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
- {{- end }}
- volumeMounts:
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- containers:
- - name: traefik
- image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.resources }}
- resources:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- ports:
- - name: http
- containerPort: 8080
- - name: https
- containerPort: 8443
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPorts }}
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- volumeMounts:
- - name: traefik-config
- mountPath: /etc/traefik
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumeMounts }}
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
- env:
- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- - name: secret-sync
- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
- imagePullPolicy: {{ . }}
- {{- end }}
- args:
- - watch-save
- - --label=app={{ include "jupyterhub.appLabel" . }}
- - --label=release={{ .Release.Name }}
- - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- - --label=heritage=secret-sync
- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- - acme.json
- - /etc/acme/acme.json
- env:
- # We need this to get logs immediately
- - name: PYTHONUNBUFFERED
- value: "True"
- volumeMounts:
- - name: certificates
- mountPath: /etc/acme
- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
- securityContext:
- {{- . | toYaml | nindent 12 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPodSpec }}
- {{- . | toYaml | nindent 6 }}
- {{- end }}
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
deleted file mode 100755
index ea43b6726..000000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}}
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.proxy.traefik.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
-rules:
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get", "patch", "list", "create"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-subjects:
-- kind: ServiceAccount
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- apiGroup:
-roleRef:
- kind: Role
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- apiGroup: rbac.authorization.k8s.io
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.autohttps.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
deleted file mode 100755
index d57c135dd..000000000
--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
-{{- if $autoHTTPS -}}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "jupyterhub.proxy-http.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.proxy.service.labels }}
- {{- . | toYaml | nindent 4 }}
- {{- end }}
- {{- with .Values.apps.jupyterhub.proxy.service.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
-spec:
- type: ClusterIP
- selector:
- {{- $_ := merge (dict "componentLabel" "proxy") . }}
- {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
- ports:
- - port: 8000
- targetPort: http
-{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
index 6d63ba880..bb37b8f03 100755
--- a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
@@ -7,6 +7,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
+ {{- end }}
replicas: 1
selector:
matchLabels:
@@ -35,7 +38,7 @@ spec:
# match the k8s Secret during the first upgrade following an auth_token
# was generated.
checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
- checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
+ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum | quote }}
{{- with .Values.apps.jupyterhub.proxy.annotations }}
{{- . | toYaml | nindent 8 }}
{{- end }}
@@ -44,7 +47,10 @@ spec:
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.chp.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -135,6 +141,8 @@ spec:
livenessProbe:
initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.failureThreshold }}
httpGet:
path: /_chp_healthz
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
@@ -149,6 +157,8 @@ spec:
readinessProbe:
initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.failureThreshold }}
httpGet:
path: /_chp_healthz
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
index adc827731..88a00be6a 100755
--- a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
@@ -85,32 +85,24 @@ spec:
egress:
# proxy --> hub
- - ports:
- - port: 8081
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "hub") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
# proxy --> singleuser-server
- - ports:
- - port: 8888
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
- # proxy --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
-
- {{- with .Values.apps.jupyterhub.proxy.chp.networkPolicy.egress }}
- # proxy --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.proxy.chp.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
index 1846a3b00..155895b06 100755
--- a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
@@ -1,9 +1,5 @@
{{- if .Values.apps.jupyterhub.proxy.chp.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.proxy.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
index 0d9ca5b2c..f634ba9e5 100755
--- a/applications/jupyterhub/deploy/templates/proxy/service.yaml
+++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml
@@ -35,12 +35,15 @@ metadata:
{{- end }}
spec:
selector:
+ # This service will target the autohttps pod if autohttps is configured, and
+ # the proxy pod if not. When autohttps is configured, the service proxy-http
+ # will be around to target the proxy pod directly.
{{- if $autoHTTPS }}
- component: autohttps
+ {{- $_ := merge (dict "componentLabel" "autohttps") . -}}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
{{- else }}
- component: proxy
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
{{- end }}
- release: {{ .Release.Name }}
ports:
{{- if $HTTPS }}
- name: https
diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
index 588cf196c..1bed905e1 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
@@ -4,22 +4,9 @@ kind: PriorityClass
metadata:
name: {{ include "jupyterhub.priority.fullname" . }}
annotations:
- # FIXME: PriorityClasses must be added before the other resources reference
- # them, and in the past a workaround was needed to accomplish this:
- # to make the resource a Helm hook.
- #
- # To transition this resource to no longer be a Helm hook resource,
- # we explicitly add ownership annotations/labels (in 1.0.0) which
- # will allow a future upgrade (in 2.0.0) to remove all hook and
- # ownership annotations/labels.
- #
- helm.sh/hook: pre-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/hook-weight: "-100"
meta.helm.sh/release-name: "{{ .Release.Name }}"
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
labels:
- app.kubernetes.io/managed-by: Helm
{{- $_ := merge (dict "componentLabel" "default-priority") . }}
{{- include "jupyterhub.labels" $_ | nindent 4 }}
value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
index b1dc6c5d0..800ac2086 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
@@ -3,11 +3,7 @@ The cluster autoscaler should be allowed to evict and reschedule these pods if
it would help in order to scale down a node.
*/}}
{{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.user-placeholder.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
index e03497dba..688e217cd 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
@@ -5,22 +5,9 @@ kind: PriorityClass
metadata:
name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
annotations:
- # FIXME: PriorityClasses must be added before the other resources reference
- # them, and in the past a workaround was needed to accomplish this:
- # to make the resource a Helm hook.
- #
- # To transition this resource to no longer be a Helm hook resource,
- # we explicitly add ownership annotations/labels (in 1.0.0) which
- # will allow a future upgrade (in 2.0.0) to remove all hook and
- # ownership annotations/labels.
- #
- helm.sh/hook: pre-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/hook-weight: "-100"
meta.helm.sh/release-name: "{{ .Release.Name }}"
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
labels:
- app.kubernetes.io/managed-by: Helm
{{- include "jupyterhub.labels" . | nindent 4 }}
value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }}
globalDefault: false
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
index 114f62629..c243beee3 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
@@ -16,6 +16,9 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
podManagementPolicy: Parallel
+ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
+ {{- end }}
replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }}
selector:
matchLabels:
@@ -23,9 +26,16 @@ spec:
serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
template:
metadata:
+ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
labels:
{{- /* Changes here will cause the Deployment to restart the pods. */}}
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
spec:
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
@@ -33,7 +43,10 @@ spec:
{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }}
schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
index ef8a37f67..3e83b444f 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
@@ -6,16 +6,28 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
data:
- # ref: https://kubernetes.io/docs/reference/scheduling/config/
+ {{- /*
+ This is configuration of a k8s official kube-scheduler binary running in the
+ user-scheduler.
+
+ ref: https://kubernetes.io/docs/reference/scheduling/config/
+ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
+ */}}
config.yaml: |
- apiVersion: kubescheduler.config.k8s.io/v1beta1
+ apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration
leaderElection:
- resourceLock: endpoints
+ resourceLock: leases
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resourceNamespace: "{{ .Release.Namespace }}"
profiles:
- schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.plugins }}
plugins:
- {{- .Values.apps.jupyterhub.scheduling.userScheduler.plugins | toYaml | nindent 10 }}
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.pluginConfig }}
+ pluginConfig:
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
index 1bcaf317c..f22d0de89 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
@@ -6,6 +6,9 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
+ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
+ {{- end }}
replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }}
selector:
matchLabels:
@@ -14,16 +17,25 @@ spec:
metadata:
labels:
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
annotations:
checksum/config-map: {{ include (print $.Template.BasePath "/jupyterhub/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
spec:
- {{- if .Values.apps.jupyterhub.rbac.enabled }}
- serviceAccountName: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
{{- end }}
{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
{{- end }}
- nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
{{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.scheduling.userScheduler.tolerations }}
tolerations:
{{- . | toYaml | nindent 8 }}
@@ -44,13 +56,6 @@ spec:
{{- end }}
command:
- /usr/local/bin/kube-scheduler
- # NOTE: --leader-elect-... (new) and --lock-object-... (deprecated)
- # flags are silently ignored in favor of whats defined in the
- # passed KubeSchedulerConfiguration whenever --config is
- # passed.
- #
- # ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/
- #
# NOTE: --authentication-skip-lookup=true is used to avoid a
# seemingly harmless error, if we need to not skip
# "authentication lookup" in the future, see the linked issue.
@@ -65,12 +70,14 @@ spec:
livenessProbe:
httpGet:
path: /healthz
- port: 10251
+ scheme: HTTPS
+ port: 10259
initialDelaySeconds: 15
readinessProbe:
httpGet:
path: /healthz
- port: 10251
+ scheme: HTTPS
+ port: 10259
{{- with .Values.apps.jupyterhub.scheduling.userScheduler.resources }}
resources:
{{- . | toYaml | nindent 12 }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
index 04f2af8c3..2c9c6de81 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
@@ -1,9 +1,5 @@
{{- if and .Values.apps.jupyterhub.scheduling.userScheduler.enabled .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}}
-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
-{{- else }}
-apiVersion: policy/v1beta1
-{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
index 083e06542..9c7fab736 100755
--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
@@ -1,16 +1,5 @@
{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
-{{- if .Values.apps.jupyterhub.rbac.enabled }}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
- labels:
- {{- include "jupyterhub.labels" . | nindent 4 }}
- {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
- annotations:
- {{- . | toYaml | nindent 4 }}
- {{- end }}
----
+{{- if .Values.apps.jupyterhub.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@@ -19,13 +8,23 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
rules:
# Copied from the system:kube-scheduler ClusterRole of the k8s version
- # matching the kube-scheduler binary we use. A modification of two resource
- # name references from kube-scheduler to user-scheduler-lock was made.
+ # matching the kube-scheduler binary we use. A modification has been made to
+ # resourceName fields to remain relevant for how we have named our resources
+ # in this Helm chart.
#
- # NOTE: These rules have been unchanged between 1.12 and 1.15, then changed in
- # 1.16 and in 1.17, but unchanged in 1.18 and 1.19.
+ # NOTE: These rules have been:
+ # - unchanged between 1.12 and 1.15
+ # - changed in 1.16
+ # - changed in 1.17
+ # - unchanged between 1.18 and 1.20
+ # - changed in 1.21: get/list/watch permission for namespace,
+ # csidrivers, csistoragecapacities was added.
+ # - unchanged between 1.22 and 1.27
+ # - changed in 1.28: permissions to get/update lock endpoint resource
+ # removed
+ # - unchanged between 1.28 and 1.29
#
- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L696-L829
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862
- apiGroups:
- ""
- events.k8s.io
@@ -50,21 +49,6 @@ rules:
verbs:
- get
- update
- - apiGroups:
- - ""
- resources:
- - endpoints
- verbs:
- - create
- - apiGroups:
- - ""
- resourceNames:
- - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
- resources:
- - endpoints
- verbs:
- - get
- - update
- apiGroups:
- ""
resources:
@@ -159,13 +143,37 @@ rules:
- get
- list
- watch
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csidrivers
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csistoragecapacities
+ verbs:
+ - get
+ - list
+ - watch
# Copied from the system:volume-scheduler ClusterRole of the k8s version
# matching the kube-scheduler binary we use.
#
- # NOTE: These rules have not changed between 1.12 and 1.19.
+ # NOTE: These rules have not changed between 1.12 and 1.29.
#
- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1213-L1240
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310
- apiGroups:
- ""
resources:
@@ -203,7 +211,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
namespace: "{{ .Release.Namespace }}"
roleRef:
kind: ClusterRole
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
new file mode 100644
index 000000000..67618b036
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
index 3dfb1378d..931a150fe 100755
--- a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
@@ -62,23 +62,38 @@ spec:
egress:
# singleuser-server --> hub
- - ports:
- - port: 8081
- to:
+ - to:
- podSelector:
matchLabels:
{{- $_ := merge (dict "componentLabel" "hub") . }}
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
- # singleuser-server --> Kubernetes internal DNS
- - ports:
- - protocol: UDP
- port: 53
- - protocol: TCP
- port: 53
+ # singleuser-server --> proxy
+ # singleuser-server --> autohttps
+ #
+ # While not critical for core functionality, a user or library code may rely
+ # on communicating with the proxy or autohttps pods via a k8s Service it can
+ # detected from well known environment variables.
+ #
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8000
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "autohttps") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8080
+ - port: 8443
- {{- with .Values.apps.jupyterhub.singleuser.networkPolicy.egress }}
- # singleuser-server --> depends, but the default is everything
- {{- . | toYaml | nindent 4 }}
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.singleuser.networkPolicy)) }}
+ {{- . | nindent 4 }}
{{- end }}
{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
new file mode 100644
index 000000000..e6eab9bd0
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.apps.jupyterhub.singleuser.extraFiles }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.singleuser.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: Opaque
+{{- with include "jupyterhub.extraFiles.data" .Values.apps.jupyterhub.singleuser.extraFiles }}
+data:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- with include "jupyterhub.extraFiles.stringData" .Values.apps.jupyterhub.singleuser.extraFiles }}
+stringData:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/values-test.yaml b/applications/jupyterhub/deploy/values-test.yaml
new file mode 100644
index 000000000..653cae1e3
--- /dev/null
+++ b/applications/jupyterhub/deploy/values-test.yaml
@@ -0,0 +1,5 @@
+harness:
+ jupyterhub:
+ prepull:
+ - cloudharness-base
+
diff --git a/applications/jupyterhub/deploy/values.schema.yaml b/applications/jupyterhub/deploy/values.schema.yaml
new file mode 100644
index 000000000..69c13a83c
--- /dev/null
+++ b/applications/jupyterhub/deploy/values.schema.yaml
@@ -0,0 +1,3014 @@
+# This schema (a jsonschema in YAML format) is used to generate
+# values.schema.json which is packaged with the Helm chart for client side
+# validation by helm of values before template rendering.
+#
+# This schema is also used by our documentation system to build the
+# configuration reference section based on the description fields. See
+# docs/source/conf.py for that logic!
+#
+# We look to document everything we have default values for in values.yaml, but
+# we don't look to enforce the perfect validation logic within this file.
+#
+# ref: https://json-schema.org/learn/getting-started-step-by-step.html
+#
+$schema: http://json-schema.org/draft-07/schema#
+type: object
+additionalProperties: false
+required:
+ - imagePullSecrets
+ - hub
+ - proxy
+ - singleuser
+ - ingress
+ - prePuller
+ - custom
+ - cull
+ - debug
+ - rbac
+ - global
+properties:
+ enabled:
+ type: [boolean, "null"]
+ description: |
+ `enabled` is ignored by the jupyterhub chart itself, but a chart depending
+ on the jupyterhub chart conditionally can make use this config option as
+ the condition.
+ fullnameOverride:
+ type: [string, "null"]
+ description: |
+ fullnameOverride and nameOverride allow you to adjust how the resources
+ part of the Helm chart are named.
+
+ Name format | Resource types | fullnameOverride | nameOverride | Note
+ ------------------------- | -------------- | ---------------- | ------------ | -
+ component | namespaced | `""` | * | Default
+ release-component | cluster wide | `""` | * | Default
+ fullname-component | * | str | * | -
+ release-component | * | null | `""` | -
+ release-(name-)component | * | null | str | omitted if contained in release
+ release-(chart-)component | * | null | null | omitted if contained in release
+
+ ```{admonition} Warning!
+ :class: warning
+ Changing fullnameOverride or nameOverride after the initial installation
+ of the chart isn't supported. Changing their values likely leads to a
+ reset of non-external JupyterHub databases, abandonment of users' storage,
+ and severed couplings to currently running user pods.
+ ```
+
+ If you are a developer of a chart depending on this chart, you should
+ avoid hardcoding names. If you want to reference the name of a resource in
+ this chart from a parent helm chart's template, you can make use of the
+ global named templates instead.
+
+ ```yaml
+ # some pod definition of a parent chart helm template
+ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ ```
+
+ To access them from a container, you can also rely on the hub ConfigMap
+ that contains entries of all the resource names.
+
+ ```yaml
+ # some container definition in a parent chart helm template
+ env:
+ - name: SCHEDULER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ key: user-scheduler
+ ```
+
+ nameOverride:
+ type: [string, "null"]
+ description: |
+ See the documentation under [`fullnameOverride`](schema_fullnameOverride).
+
+ imagePullSecret:
+ type: object
+ required: [create]
+ if:
+ properties:
+ create:
+ const: true
+ then:
+ additionalProperties: false
+ required: [registry, username, password]
+ description: |
+ This is configuration to create a k8s Secret resource of `type:
+ kubernetes.io/dockerconfigjson`, with credentials to pull images from a
+ private image registry. If you opt to do so, it will be available for use
+ by all pods in their respective `spec.imagePullSecrets` alongside other
+ k8s Secrets defined in `imagePullSecrets` or the pod respective
+ `...image.pullSecrets` configuration.
+
+ In other words, using this configuration option can automate both the
+ otherwise manual creation of a k8s Secret and the otherwise manual
+ configuration to reference this k8s Secret in all the pods of the Helm
+ chart.
+
+ ```sh
+ # you won't need to create a k8s Secret manually...
+ kubectl create secret docker-registry image-pull-secret \
+ --docker-server= \
+ --docker-username= \
+ --docker-email= \
+ --docker-password=
+ ```
+
+ If you just want to let all Pods reference an existing secret, use the
+ [`imagePullSecrets`](schema_imagePullSecrets) configuration instead.
+ properties:
+ create:
+ type: boolean
+ description: |
+ Toggle the creation of the k8s Secret with provided credentials to
+ access a private image registry.
+ automaticReferenceInjection:
+ type: boolean
+ description: |
+ Toggle the automatic reference injection of the created Secret to all
+ pods' `spec.imagePullSecrets` configuration.
+ registry:
+ type: string
+ description: |
+ Name of the private registry you want to create a credential set for.
+ It will default to Docker Hub's image registry.
+
+ Examples:
+ - https://index.docker.io/v1/
+ - quay.io
+ - eu.gcr.io
+ - alexmorreale.privatereg.net
+ username:
+ type: string
+ description: |
+ Name of the user you want to use to connect to your private registry.
+
+ For external gcr.io, you will use the `_json_key`.
+
+ Examples:
+ - alexmorreale
+ - alex@pfc.com
+ - _json_key
+ password:
+ type: string
+ description: |
+ Password for the private image registry's user.
+
+ Examples:
+ - plaintextpassword
+ - abc123SECRETzyx098
+
+ For gcr.io registries the password will be a big JSON blob for a
+ Google cloud service account, it should look something like below.
+
+ ```yaml
+ password: |-
+ {
+ "type": "service_account",
+ "project_id": "jupyter-se",
+ "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85",
+ ...
+ }
+ ```
+ email:
+ type: [string, "null"]
+ description: |
+ Specification of an email is most often not required, but it is
+ supported.
+
+ imagePullSecrets:
+ type: array
+ description: |
+ Chart wide configuration to _append_ k8s Secret references to all its
+ pod's `spec.imagePullSecrets` configuration.
+
+ This will not override or get overridden by pod specific configuration,
+ but instead augment the pod specific configuration.
+
+ You can use both the k8s native syntax, where each list element is like
+ `{"name": "my-secret-name"}`, or you can let list elements be strings
+ naming the secrets directly.
+
+ hub:
+ type: object
+ additionalProperties: false
+ required: [baseUrl]
+ properties:
+ revisionHistoryLimit: &revisionHistoryLimit
+ type: [integer, "null"]
+ minimum: 0
+ description: |
+ Configures the resource's `spec.revisionHistoryLimit`. This is
+ available for Deployment, StatefulSet, and DaemonSet resources.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)
+ for more info.
+ config:
+ type: object
+ additionalProperties: true
+ description: |
+ JupyterHub and its components (authenticators, spawners, etc), are
+ Python classes that expose its configuration through
+ [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this
+ Helm chart configuration (`hub.config`), you can directly configure
+ the Python classes through _static_ YAML values. To _dynamically_ set
+ values, you need to use [`hub.extraConfig`](schema_hub.extraConfig)
+ instead.
+
+ ```{admonition} Currently intended only for auth config
+ :class: warning
+ This config _currently_ (0.11.0) only influence the software in the
+ `hub` Pod, but some Helm chart config options such as
+ [`hub.baseUrl`](schema_hub.baseUrl) is used to set
+ `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm
+ templates are rendered.
+
+ As we have not yet mapped out all the potential configuration
+ conflicts except for the authentication related configuration options,
+ please accept that using it for something else at this point can lead
+ to issues.
+ ```
+
+ __Example__
+
+ If you inspect documentation or some `jupyterhub_config.py` to contain
+ the following section:
+
+ ```python
+ c.JupyterHub.admin_access = true
+ c.JupyterHub.admin_users = ["jovyan1", "jovyan2"]
+ c.KubeSpawner.k8s_api_request_timeout = 10
+ c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"]
+ ```
+
+ Then, you would be able to represent it with this configuration like:
+
+ ```yaml
+ hub:
+ config:
+ JupyterHub:
+ admin_access: true
+ admin_users:
+ - jovyan1
+ - jovyan2
+ KubeSpawner:
+ k8s_api_request_timeout: 10
+ GitHubOAuthenticator:
+ allowed_organizations:
+ - jupyterhub
+ ```
+
+ ```{admonition} YAML limitations
+ :class: tip
+ You can't represent Python `Bytes` or `Set` objects in YAML directly.
+ ```
+
+ ```{admonition} Helm value merging
+ :class: tip
+ `helm` merges a Helm chart's default values with values passed with
+ the `--values` or `-f` flag. During merging, lists are replaced while
+ dictionaries are updated.
+ ```
+ extraFiles: &extraFiles
+ type: object
+ additionalProperties: false
+ description: |
+ A dictionary with extra files to be injected into the pod's container
+ on startup. This can for example be used to inject: configuration
+ files, custom user interface templates, images, and more.
+
+ ```yaml
+ # NOTE: "hub" is used in this example, but the configuration is the
+ # same for "singleuser".
+ hub:
+ extraFiles:
+ # The file key is just a reference that doesn't influence the
+ # actual file name.
+ :
+ # mountPath is required and must be the absolute file path.
+ mountPath:
+
+ # Choose one out of the three ways to represent the actual file
+ # content: data, stringData, or binaryData.
+ #
+ # data should be set to a mapping (dictionary). It will in the
+ # end be rendered to either YAML, JSON, or TOML based on the
+ # filename extension that are required to be either .yaml, .yml,
+ # .json, or .toml.
+ #
+ # If your content is YAML, JSON, or TOML, it can make sense to
+ # use data to represent it over stringData as data can be merged
+ # instead of replaced if set partially from separate Helm
+ # configuration files.
+ #
+ # Both stringData and binaryData should be set to a string
+ # representing the content, where binaryData should be the
+ # base64 encoding of the actual file content.
+ #
+ data:
+ myConfig:
+ myMap:
+ number: 123
+ string: "hi"
+ myList:
+ - 1
+ - 2
+ stringData: |
+ hello world!
+ binaryData: aGVsbG8gd29ybGQhCg==
+
+ # mode is by default 0644 and you can optionally override it
+ # either by octal notation (example: 0400) or decimal notation
+ # (example: 256).
+ mode:
+ ```
+
+ **Using --set-file**
+
+ To avoid embedding entire files in the Helm chart configuration, you
+ can use the `--set-file` flag during `helm upgrade` to set the
+ stringData or binaryData field.
+
+ ```yaml
+ hub:
+ extraFiles:
+ my_image:
+ mountPath: /usr/local/share/jupyterhub/static/my_image.png
+
+ # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are
+ # automatically loaded in alphabetical order of the final file
+ # name when JupyterHub starts.
+ my_config:
+ mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py
+ ```
+
+ ```bash
+ # --set-file expects a text based file, so you need to base64 encode
+ # it manually first.
+ base64 my_image.png > my_image.png.b64
+
+ helm upgrade <...> \
+ --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \
+ --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py
+ ```
+
+ **Common uses**
+
+ 1. **JupyterHub template customization**
+
+ You can replace the default JupyterHub user interface templates in
+ the hub pod by injecting new ones to
+ `/usr/local/share/jupyterhub/templates`. These can in turn
+ reference custom images injected to
+ `/usr/local/share/jupyterhub/static`.
+
+ 1. **JupyterHub standalone file config**
+
+ Instead of embedding JupyterHub python configuration as a string
+ within a YAML file through
+ [`hub.extraConfig`](schema_hub.extraConfig), you can inject a
+ standalone .py file into
+ `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is
+ automatically loaded.
+
+ 1. **Flexible configuration**
+
+ By injecting files, you don't have to embed them in a docker image
+ that you have to rebuild.
+
+ If your configuration file is a YAML/JSON/TOML file, you can also
+ use `data` instead of `stringData` which allow you to set various
+ configuration in separate Helm config files. This can be useful to
+ help dependent charts override only some configuration part of the
+ file, or to allow for the configuration be set through multiple
+ Helm configuration files.
+
+ **Limitations**
+
+ 1. File size
+
+ The files in `hub.extraFiles` and `singleuser.extraFiles` are
+ respectively stored in their own k8s Secret resource. As k8s
+ Secret's are limited, typically to 1MB, you will be limited to a
+ total file size of less than 1MB as there is also base64 encoding
+ that takes place reducing available capacity to 75%.
+
+ 2. File updates
+
+ The files that are mounted are only set during container startup.
+ This is [because we use
+ `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)
+ as is required to avoid replacing the content of the entire
+ directory we mount in.
+ patternProperties:
+ ".*":
+ type: object
+ additionalProperties: false
+ required: [mountPath]
+ oneOf:
+ - required: [data]
+ - required: [stringData]
+ - required: [binaryData]
+ properties:
+ mountPath:
+ type: string
+ data:
+ type: object
+ additionalProperties: true
+ stringData:
+ type: string
+ binaryData:
+ type: string
+ mode:
+ type: number
+ baseUrl:
+ type: string
+ description: |
+ This is the equivalent of c.JupyterHub.base_url, but it is also needed
+ by the Helm chart in general. So, instead of setting
+ c.JupyterHub.base_url, use this configuration.
+ command:
+ type: array
+ description: |
+ A list of strings to be used to replace the JupyterHub image's
+ `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's
+ `ENTRYPOINT` is called `command`. The list of strings will be expanded
+ with Helm's template function `tpl` which can render Helm template
+ logic inside curly braces (`{{... }}`).
+
+ This could be useful to wrap the invocation of JupyterHub itself in
+ some custom way.
+
+ For more details, see the [Kubernetes
+ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
+ args:
+ type: array
+ description: |
+ A list of strings to be used to replace the JupyterHub image's `CMD`
+ entry as well as the Helm chart's default way to start JupyterHub.
+ Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The
+ list of strings will be expanded with Helm's template function `tpl`
+ which can render Helm template logic inside curly braces (`{{... }}`).
+
+ ```{warning}
+ By replacing the entire configuration file, which is mounted to
+ `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart,
+ instead of appending to it with `hub.extraConfig`, you expose your
+ deployment for issues stemming from getting out of sync with the Helm
+ chart's config file.
+
+ These kind of issues will be significantly harder to debug and
+ diagnose, and can due to this could cause a lot of time expenditure
+ for both the community maintaining the Helm chart as well as yourself,
+ even if this wasn't the reason for the issue.
+
+ Due to this, we ask that you do your _absolute best to avoid replacing
+ the default provided `jupyterhub_config.py` file. It can often be
+ possible. For example, if your goal is to have a dedicated .py file
+ for more extensive additions that you can syntax highlight and such
+ and feel limited by passing code in `hub.extraConfig` which is part of
+ a YAML file, you can use [this
+ trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237)
+ instead.
+ ```
+
+ ```yaml
+ hub:
+ args:
+ - "jupyterhub"
+ - "--config"
+ - "/usr/local/etc/jupyterhub/jupyterhub_config.py"
+ - "--debug"
+ - "--upgrade-db"
+ ```
+
+ For more details, see the [Kubernetes
+ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
+ cookieSecret:
+ type: [string, "null"]
+ description: |
+ ```{note}
+ As of version 1.0.0 this will automatically be generated and there is
+ no need to set it manually.
+
+ If you wish to reset a generated key, you can use `kubectl edit` on
+ the k8s Secret typically named `hub` and remove the
+ `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then
+ perform a new `helm upgrade`.
+ ```
+
+ A 32-byte cryptographically secure randomly generated string used to sign values of
+ secure cookies set by the hub. If unset, jupyterhub will generate one on startup and
+ save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of
+ the hub container. A value set here will make JupyterHub overwrite any previous file.
+
+ You do not need to set this at all if you are using the default configuration for
+ storing databases - sqlite on a persistent volume (with `hub.db.type` set to the
+ default `sqlite-pvc`). If you are using an external database, then you must set this
+ value explicitly - or your users will keep getting logged out each time the hub pod
+ restarts.
+
+ Changing this value will all user logins to be invalidated. If this secret leaks,
+ *immediately* change it to something else, or user data can be compromised
+
+ ```sh
+ # to generate a value, run
+ openssl rand -hex 32
+ ```
+ image: &image-spec
+ type: object
+ additionalProperties: false
+ required: [name, tag]
+ description: |
+ Set custom image name, tag, pullPolicy, or pullSecrets for the pod.
+ properties:
+ name:
+ type: string
+ description: |
+ The name of the image, without the tag.
+
+ ```
+ # example name
+ gcr.io/my-project/my-image
+ ```
+ tag:
+ type: string
+ description: |
+ The tag of the image to pull. This is the value following `:` in
+ complete image specifications.
+
+ ```
+ # example tags
+ v1.11.1
+ zhy270a
+ ```
+ pullPolicy:
+ enum: [null, "", IfNotPresent, Always, Never]
+ description: |
+ Configures the Pod's `spec.imagePullPolicy`.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)
+ for more info.
+ pullSecrets:
+ type: array
+ description: |
+ A list of references to existing Kubernetes Secrets with
+ credentials to pull the image.
+
+ This Pod's final `imagePullSecrets` k8s specification will be a
+ combination of:
+
+ 1. This list of k8s Secrets, specific for this pod.
+ 2. The list of k8s Secrets, for use by all pods in the Helm chart,
+ declared in this Helm charts configuration called
+ `imagePullSecrets`.
+ 3. A k8s Secret, for use by all pods in the Helm chart, if
+ conditionally created from image registry credentials provided
+ under `imagePullSecret` if `imagePullSecret.create` is set to
+ true.
+
+ ```yaml
+ # example - k8s native syntax
+ pullSecrets:
+ - name: my-k8s-secret-with-image-registry-credentials
+
+ # example - simplified syntax
+ pullSecrets:
+ - my-k8s-secret-with-image-registry-credentials
+ ```
+ networkPolicy: &networkPolicy-spec
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration regards the creation and configuration of a k8s
+ _NetworkPolicy resource_.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Toggle the creation of the NetworkPolicy resource targeting this
+ pod, and by doing so, restricting its communication to only what
+ is explicitly allowed in the NetworkPolicy.
+ ingress:
+ type: array
+ description: |
+ Additional ingress rules to add besides those that are required
+ for core functionality.
+ egress:
+ type: array
+ description: |
+ Additional egress rules to add besides those that are required for
+ core functionality and those added via
+ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).
+
+ ```{versionchanged} 2.0.0
+ The default value changed from providing one very permissive rule
+ allowing all egress to providing no rule. The permissive rule is
+ still provided via
+ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)
+ set to true though.
+ ```
+
+ As an example, below is a configuration that disables the more
+ broadly permissive `.privateIPs` egress allow rule for the hub
+ pod, and instead provides tightly scoped permissions to access a
+ specific k8s local service as identified by pod labels.
+
+ ```yaml
+ hub:
+ networkPolicy:
+ egressAllowRules:
+ privateIPs: false
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: my-k8s-local-service
+ ports:
+ - protocol: TCP
+ port: 5978
+ ```
+ egressAllowRules:
+ type: object
+ additionalProperties: false
+ description: |
+ This is a set of predefined rules that when enabled will be added
+ to the NetworkPolicy list of egress rules.
+
+ The resulting egress rules will be a composition of:
+ - rules specific for the respective pod(s) function within the
+ Helm chart
+ - rules based on enabled `egressAllowRules` flags
+ - rules explicitly specified by the user
+
+ ```{note}
+ Each flag under this configuration will not render into a
+ dedicated rule in the NetworkPolicy resource, but instead combine
+ with the other flags to a reduced set of rules to avoid a
+ performance penalty.
+ ```
+
+ ```{versionadded} 2.0.0
+ ```
+ properties:
+ cloudMetadataServer:
+ type: boolean
+ description: |
+ Defaults to `false` for singleuser servers, but to `true` for
+ all other network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the cloud metadata server.
+
+ Note that the `nonPrivateIPs` rule is allowing all non Private
+ IP ranges but makes an exception for the cloud metadata
+ server, leaving this as the definitive configuration to allow
+ access to the cloud metadata server.
+
+ ```{versionchanged} 3.0.0
+ This configuration is not allowed to be configured true at the
+ same time as
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ to avoid an ambiguous configuration.
+ ```
+ dnsPortsCloudMetadataServer:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the cloud metadata server
+ via port 53.
+
+ Relying on this rule for the singleuser config should go hand
+ in hand with disabling
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ to avoid an ambiguous configuration.
+
+ Known situations when this rule can be relevant:
+
+ - In GKE clusters with Cloud DNS that is reached at the
+ cloud metadata server's non-private IP.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+ ```
+
+ ```{versionadded} 3.0.0
+ ```
+ dnsPortsKubeSystemNamespace:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to pods in the kube-system
+ namespace via port 53.
+
+ Known situations when this rule can be relevant:
+
+ - GKE, EKS, AKS, and other clusters relying directly on
+ `kube-dns` or `coredns` pods in the `kube-system` namespace.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+ ```
+
+ ```{versionadded} 3.0.0
+ ```
+ dnsPortsPrivateIPs:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to private IPs via port 53.
+
+ Known situations when this rule can be relevant:
+
+ - GKE clusters relying on a DNS server indirectly via a a node
+ local DNS cache at an unknown private IP.
+
+ ```{note}
+ This chart doesn't know how to identify the DNS server that
+ pods will rely on due to variations between how k8s clusters
+ have been setup. Due to that, multiple rules are enabled by
+ default to ensure DNS connectivity.
+
+ ```{warning}
+ This rule is not expected to work in clusters relying on
+ Cilium to enforce the NetworkPolicy rules (includes GKE
+ clusters with Dataplane v2), this is due to a [known
+ limitation](https://github.com/cilium/cilium/issues/9209).
+ ```
+ nonPrivateIPs:
+ type: boolean
+ description: |
+ Defaults to `true` for all network policies.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the non-private IP ranges
+ with the exception of the cloud metadata server. This means
+ respective pod(s) can establish connections to the internet
+ but not (say) an unsecured prometheus server running in the
+ same cluster.
+ privateIPs:
+ type: boolean
+ description: |
+ Defaults to `false` for singleuser servers, but to `true` for
+ all other network policies.
+
+ Private IPs refer to the IP ranges `10.0.0.0/8`,
+ `172.16.0.0/12`, `192.168.0.0/16`.
+
+ When enabled this rule allows the respective pod(s) to
+ establish outbound connections to the internal k8s cluster.
+ This means users can access the internet but not (say) an
+ unsecured prometheus server running in the same cluster.
+
+ Since not all workloads in the k8s cluster may have
+ NetworkPolicies setup to restrict their incoming connections,
+ having this set to false can be a good defense against
+ malicious intent from someone in control of software in these
+ pods.
+
+ If possible, try to avoid setting this to true as it gives
+ broad permissions that could be specified more directly via
+ the [`.egress`](schema_singleuser.networkPolicy.egress).
+
+ ```{warning}
+ This rule is not expected to work in clusters relying on
+ Cilium to enforce the NetworkPolicy rules (includes GKE
+ clusters with Dataplane v2), this is due to a [known
+ limitation](https://github.com/cilium/cilium/issues/9209).
+ ```
+ interNamespaceAccessLabels:
+ enum: [accept, ignore]
+ description: |
+ This configuration option determines if both namespaces and pods
+ in other namespaces, that have specific access labels, should be
+ accepted to allow ingress (set to `accept`), or, if the labels are
+ to be ignored when applied outside the local namespace (set to
+ `ignore`).
+
+ The available access labels for respective NetworkPolicy resources
+ are:
+
+ - `hub.jupyter.org/network-access-hub: "true"` (hub)
+ - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik)
+ - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp)
+ - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser)
+ allowedIngressPorts:
+ type: array
+ description: |
+ A rule to allow ingress on these ports will be added no matter
+ what the origin of the request is. The default setting for
+ `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is
+ `[http, https]`, while it is `[]` for other networkPolicies.
+
+ Note that these port names or numbers target a Pod's port name or
+ number, not a k8s Service's port name or number.
+ db:
+ type: object
+ additionalProperties: false
+ properties:
+ type:
+ enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other]
+ description: |
+ Type of database backend to use for the hub database.
+
+ The Hub requires a persistent database to function, and this lets you specify
+ where it should be stored.
+
+ The various options are:
+
+ 1. **sqlite-pvc**
+
+ Use an `sqlite` database kept on a persistent volume attached to the hub.
+
+ By default, this disk is created by the cloud provider using
+ *dynamic provisioning* configured by a [storage
+ class](https://kubernetes.io/docs/concepts/storage/storage-classes/).
+ You can customize how this disk is created / attached by
+ setting various properties under `hub.db.pvc`.
+
+ This is the default setting, and should work well for most cloud provider
+ deployments.
+
+ 2. **sqlite-memory**
+
+ Use an in-memory `sqlite` database. This should only be used for testing,
+ since the database is erased whenever the hub pod restarts - causing the hub
+ to lose all memory of users who had logged in before.
+
+ When using this for testing, make sure you delete all other objects that the
+ hub has created (such as user pods, user PVCs, etc) every time the hub restarts.
+ Otherwise you might run into errors about duplicate resources.
+
+ 3. **mysql**
+
+ Use an externally hosted mysql database.
+
+ You have to specify an sqlalchemy connection string for the mysql database you
+ want to connect to in `hub.db.url` if using this option.
+
+ The general format of the connection string is:
+ ```
+ mysql+pymysql://:@:/
+ ```
+
+ The user specified in the connection string must have the rights to create
+ tables in the database specified.
+
+ 4. **postgres**
+
+ Use an externally hosted postgres database.
+
+ You have to specify an sqlalchemy connection string for the postgres database you
+ want to connect to in `hub.db.url` if using this option.
+
+ The general format of the connection string is:
+ ```
+ postgresql+psycopg2://:@:/
+ ```
+
+ The user specified in the connection string must have the rights to create
+ tables in the database specified.
+
+ 5. **other**
+
+ Use an externally hosted database of some kind other than mysql
+ or postgres.
+
+ When using _other_, the database password must be passed as
+ part of [hub.db.url](schema_hub.db.url) as
+ [hub.db.password](schema_hub.db.password) will be ignored.
+ pvc:
+ type: object
+ additionalProperties: false
+ required: [storage]
+ description: |
+ Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`.
+ properties:
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: &labels-and-annotations-patternProperties
+ ".*":
+ type: string
+ description: |
+ Annotations to apply to the PVC containing the sqlite database.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ selector:
+ type: object
+ additionalProperties: true
+ description: |
+ Label selectors to set for the PVC containing the sqlite database.
+
+ Useful when you are using a specific PV, and want to bind to
+ that and only that.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+ for more details about using a label selector for what PV to
+ bind to.
+ storage:
+ type: string
+ description: |
+ Size of disk to request for the database disk.
+ accessModes:
+ type: array
+ items:
+ type: [string, "null"]
+ description: |
+ AccessModes contains the desired access modes the volume
+ should have. See [the k8s
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1)
+ for more information.
+ storageClassName:
+ type: [string, "null"]
+ description: |
+ Name of the StorageClass required by the claim.
+
+ If this is a blank string it will be set to a blank string,
+ while if it is null, it will not be set at all.
+ subPath:
+ type: [string, "null"]
+ description: |
+ Path within the volume from which the container's volume
+ should be mounted. Defaults to "" (volume's root).
+ upgrade:
+ type: [boolean, "null"]
+ description: |
+ Users with external databases need to opt-in for upgrades of the
+ JupyterHub specific database schema if needed as part of a
+ JupyterHub version upgrade.
+ url:
+ type: [string, "null"]
+ description: |
+ Connection string when `hub.db.type` is mysql or postgres.
+
+ See documentation for `hub.db.type` for more details on the format of this property.
+ password:
+ type: [string, "null"]
+ description: |
+ Password for the database when `hub.db.type` is mysql or postgres.
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the hub pod.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ initContainers:
+ type: array
+ description: |
+ list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ hub:
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the hub pod.
+
+ Environment variables are usually used to:
+ - Pass parameters to some custom code in `hub.extraConfig`.
+ - Configure code running in the hub pod, such as an authenticator or
+ spawner.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ hub:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ HUB_NAMESPACE:
+ name: HUB_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_HUB_NAMESPACE:
+ value: "my-prefix-$(HUB_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ extraConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ Arbitrary extra python based configuration that should be in `jupyterhub_config.py`.
+
+ This is the *escape hatch* - if you want to configure JupyterHub to do something specific
+ that is not present here as an option, you can write the raw Python to do it here.
+
+ extraConfig is a *dict*, so there can be multiple configuration
+ snippets under different names. The configuration sections are run in
+ alphabetical order based on the keys.
+
+ Non-exhaustive examples of things you can do here:
+ - Subclass authenticator / spawner to do a custom thing
+ - Dynamically launch different images for different sets of images
+ - Inject an auth token from GitHub authenticator into user pod
+ - Anything else you can think of!
+
+ Since this is usually a multi-line string, you want to format it using YAML's
+ [| operator](https://yaml.org/spec/1.2.2/#23-scalars).
+
+ For example:
+
+ ```yaml
+ hub:
+ extraConfig:
+ myConfig.py: |
+ c.JupyterHub.something = 'something'
+ c.Spawner.something_else = 'something else'
+ ```
+
+ ```{note}
+ No code validation is performed until JupyterHub loads it! If you make
+ a typo here, it will probably manifest itself as the hub pod failing
+ to start up and instead entering an `Error` state or the subsequent
+ `CrashLoopBackoff` state.
+
+ To make use of your own programs linters etc, it would be useful to
+ not embed Python code inside a YAML file. To do that, consider using
+ [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to
+ `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your
+ extra configuration logic.
+ ```
+
+ fsGid:
+ type: [integer, "null"]
+ minimum: 0
+ # This schema entry is needed to help us print a more helpful error
+ # message in NOTES.txt if hub.fsGid is set.
+ #
+ description: |
+ ```{note}
+ Removed in version 2.0.0. Use
+ [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify
+ `fsGroup` instead.
+ ```
+ service:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to configure the service the JupyterHub will be exposed on by the Kubernetes server.
+ properties:
+ type:
+ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
+ description: |
+ The Kubernetes ServiceType to be used.
+
+ The default type is `ClusterIP`.
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
+ to learn more about service types.
+ ports:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to configure the ports the hub service will be deployed on.
+ properties:
+ nodePort:
+ type: [integer, "null"]
+ minimum: 0
+ description: |
+ The nodePort to deploy the hub service on.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Kubernetes annotations to apply to the hub service.
+ extraPorts:
+ type: array
+ description: |
+ Extra ports to add to the Hub Service object besides `hub` / `8081`.
+ This should be an array that includes `name`, `port`, and `targetPort`.
+ See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details.
+ loadBalancerIP:
+ type: [string, "null"]
+ description: |
+ A public IP address the hub Kubernetes service should be exposed
+ on. To expose the hub directly is not recommended. Instead route
+ traffic through the proxy-public service towards the hub.
+
+ pdb: &pdb-spec
+ type: object
+ additionalProperties: false
+ description: |
+ Configure a PodDisruptionBudget for this Deployment.
+
+ These are disabled by default for our deployments that don't support
+ being run in parallel with multiple replicas. Only the user-scheduler
+ currently supports being run in parallel with multiple replicas. If
+ they are enabled for a Deployment with only one replica, they will
+ block `kubectl drain` of a node for example.
+
+ Note that if you aim to block scaling down a node with the
+ hub/proxy/autohttps pod that would cause disruptions of the
+ deployment, then you should instead annotate the pods of the
+ Deployment [as described
+ here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).
+
+ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)
+ for more details about disruptions.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Decides if a PodDisruptionBudget is created targeting the
+ Deployment's pods.
+ maxUnavailable:
+ type: [integer, "null"]
+ description: |
+ The maximum number of pods that can be unavailable during
+ voluntary disruptions.
+ minAvailable:
+ type: [integer, "null"]
+ description: |
+ The minimum number of pods required to be available during
+ voluntary disruptions.
+ existingSecret:
+ type: [string, "null"]
+ description: |
+ This option allow you to provide the name of an existing k8s Secret to
+ use alongside of the chart managed k8s Secret. The content of this k8s
+ Secret will be merged with the chart managed k8s Secret, giving
+ priority to the self-managed k8s Secret.
+
+ ```{warning}
+ 1. The self managed k8s Secret must mirror the structure in the chart
+ managed secret.
+ 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka.
+ `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from
+ the chart managed k8s Secret.
+ ```
+ nodeSelector: &nodeSelector-spec
+ type: object
+ additionalProperties: true
+ description: |
+ An object with key value pairs representing labels. K8s Nodes are
+ required to have match all these labels for this Pod to scheduled on
+ them.
+
+ ```yaml
+ disktype: ssd
+ nodetype: awesome
+ ```
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)
+ for more details.
+ tolerations: &tolerations-spec
+ type: array
+ description: |
+ Tolerations allow a pod to be scheduled on nodes with taints. These
+ tolerations are additional tolerations to the tolerations common to
+ all pods of a their respective kind
+ ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),
+ [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).
+
+ Pass this field an array of
+ [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)
+ objects.
+
+ See the [Kubernetes
+ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
+ for more info.
+ activeServerLimit:
+ type: [integer, "null"]
+ description: &jupyterhub-native-config-description |
+ JupyterHub native configuration, see the [JupyterHub
+ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)
+ for more information.
+ allowNamedServers:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s annotations for the hub pod.
+ authenticatePrometheus:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ concurrentSpawnLimit:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ consecutiveFailureLimit:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ podSecurityContext: &podSecurityContext-spec
+ additionalProperties: true
+ description: |
+ A k8s native specification of the pod's security context, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core)
+ for details.
+ containerSecurityContext: &containerSecurityContext-spec
+ type: object
+ additionalProperties: true
+ description: |
+ A k8s native specification of the container's security context, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)
+ for details.
+ deploymentStrategy:
+ type: object
+ additionalProperties: false
+ properties:
+ rollingUpdate:
+ type: [string, "null"]
+ type:
+ type: [string, "null"]
+ description: |
+ JupyterHub does not support running in parallel, due to this we
+ default to using a deployment strategy of Recreate.
+ extraContainers: &extraContainers-spec
+ type: array
+ description: |
+ Additional containers for the Pod. Use a k8s native syntax.
+ extraVolumeMounts: &extraVolumeMounts-spec
+ type: array
+ description: |
+ Additional volume mounts for the Container. Use a k8s native syntax.
+ extraVolumes: &extraVolumes-spec
+ type: array
+ description: |
+ Additional volumes for the Pod. Use a k8s native syntax.
+ livenessProbe: &probe-spec
+ type: object
+ additionalProperties: true
+ required: [enabled]
+ if:
+ properties:
+ enabled:
+ const: true
+ then:
+ description: |
+ This config option is like the k8s native specification of a
+ container probe, except that it also supports an `enabled` boolean
+ flag.
+
+ See [the k8s
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)
+ for more details.
+ readinessProbe: *probe-spec
+ namedServerLimitPerUser:
+ type: [integer, "null"]
+ description: *jupyterhub-native-config-description
+ redirectToServer:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ resources: &resources-spec
+ type: object
+ additionalProperties: true
+ description: |
+ A k8s native specification of resources, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).
+ lifecycle: &lifecycle-spec
+ type: object
+ additionalProperties: false
+ description: |
+ A k8s native specification of lifecycle hooks on the container, see [the
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core).
+ properties:
+ postStart:
+ type: object
+ additionalProperties: true
+ preStop:
+ type: object
+ additionalProperties: true
+ services:
+ type: object
+ additionalProperties: true
+ description: |
+ This is where you register JupyterHub services. For details on how to
+ configure these services in this Helm chart just keep reading but for
+ details on services themselves instead read [JupyterHub's
+ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html).
+
+ ```{note}
+ Only a selection of JupyterHub's configuration options that can be
+ configured for a service are documented below. All configuration set
+ here will be applied even if this Helm chart doesn't recognize it.
+ ```
+
+ JupyterHub's native configuration accepts a list of service objects,
+ this Helm chart only accept a dictionary where each key represents the
+ name of a service and the value is the actual service objects.
+
+ When configuring JupyterHub services via this Helm chart, the `name`
+ field can be omitted as it can be implied by the dictionary key.
+ Further, the `api_token` field can be omitted as it will be
+ automatically generated as of version 1.1.0 of this Helm chart.
+
+ If you have an external service that needs to access the automatically
+ generated api_token for the service, you can access it from the `hub`
+ k8s Secret part of this Helm chart under the key
+ `hub.services.my-service-config-key.apiToken`.
+
+ Here is an example configuration of two services where the first
+ explicitly sets a name and api_token, while the second omits those and
+ lets the name be implied from the key name and the api_token be
+ automatically generated.
+
+ ```yaml
+ hub:
+ services:
+ my-service-1:
+ admin: true
+ name: my-explicitly-set-service-name
+ api_token: my-explicitly-set-api_token
+
+ # the name of the following service will be my-service-2
+ # the api_token of the following service will be generated
+ my-service-2: {}
+ ```
+
+ If you develop a Helm chart depending on the JupyterHub Helm chart and
+ want to let some Pod's environment variable be populated with the
+ api_token of a service registered like above, then do something along
+ these lines.
+
+ ```yaml
+ # ... container specification of a pod ...
+ env:
+ - name: MY_SERVICE_1_API_TOKEN
+ valueFrom:
+ secretKeyRef:
+ # Don't hardcode the name, use the globally accessible
+ # named templates part of the JupyterHub Helm chart.
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ # Note below the use of the configuration key my-service-1
+ # rather than the explicitly set service name.
+ key: hub.services.my-service-1.apiToken
+ ```
+ properties:
+ name:
+ type: string
+ description: |
+ The name can be implied via the key name under which this
+ service is configured, and is due to that allowed to be
+ omitted in this Helm chart configuration of JupyterHub.
+ admin:
+ type: boolean
+ command:
+ type: [string, array]
+ url:
+ type: string
+ api_token:
+ type: [string, "null"]
+ description: |
+ The api_token will be automatically generated if not
+ explicitly set. It will also be exposed in via a k8s Secret
+ part of this Helm chart under a specific key.
+
+ See the documentation under
+ [`hub.services`](schema_hub.services) for details about this.
+ apiToken:
+ type: [string, "null"]
+ description: |
+ An alias for api_token provided for backward compatibility by
+ the JupyterHub Helm chart that will be transformed to
+ api_token.
+ loadRoles:
+ type: object
+ additionalProperties: true
+ description: |
+ This is where you should define JupyterHub roles and apply them to
+ JupyterHub users, groups, and services to grant them additional
+ permissions as defined in JupyterHub's RBAC system.
+
+ Complement this documentation with [JupyterHub's
+ documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles)
+ about `load_roles`.
+
+ Note that while JupyterHub's native configuration `load_roles` accepts
+ a list of role objects, this Helm chart only accepts a dictionary where
+ each key represents the name of a role and the value is the actual
+ role object.
+
+ ```yaml
+ hub:
+ loadRoles:
+ teacher:
+ description: Access to users' information and group membership
+
+ # this role provides permissions to...
+ scopes: [users, groups]
+
+ # this role will be assigned to...
+ users: [erik]
+ services: [grading-service]
+ groups: [teachers]
+ ```
+
+ When configuring JupyterHub roles via this Helm chart, the `name`
+ field can be omitted as it can be implied by the dictionary key.
+ shutdownOnLogout:
+ type: [boolean, "null"]
+ description: *jupyterhub-native-config-description
+ templatePaths:
+ type: array
+ description: *jupyterhub-native-config-description
+ templateVars:
+ type: object
+ additionalProperties: true
+ description: *jupyterhub-native-config-description
+ serviceAccount: &serviceAccount
+ type: object
+ required: [create]
+ additionalProperties: false
+ description: |
+ Configuration for a k8s ServiceAccount dedicated for use by the
+ specific pod which this configuration is nested under.
+ properties:
+ create:
+ type: boolean
+ description: |
+ Whether or not to create the `ServiceAccount` resource.
+ name:
+ type: ["string", "null"]
+ description: |
+ This configuration serves multiple purposes:
+
+ - It will be the `serviceAccountName` referenced by related Pods.
+ - If `create` is set, the created ServiceAccount resource will be named like this.
+ - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.
+
+ If not explicitly provided, a default name will be used.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Kubernetes annotations to apply to the k8s ServiceAccount.
+ extraPodSpec: &extraPodSpec-spec
+ type: object
+ additionalProperties: true
+ description: |
+ Arbitrary extra k8s pod specification as a YAML object. The default
+ value of this setting is an empty object, i.e. no extra configuration.
+ The value of this property is augmented to the pod specification as-is.
+
+ This is a powerful tool for expert k8s administrators with advanced
+ configuration requirements. This setting should only be used for
+ configuration that cannot be accomplished through the other settings.
+ Misusing this setting can break your deployment and/or compromise
+ your system security.
+
+ This is one of four related settings for inserting arbitrary pod
+ specification:
+
+ 1. hub.extraPodSpec
+ 2. proxy.chp.extraPodSpec
+ 3. proxy.traefik.extraPodSpec
+ 4. scheduling.userScheduler.extraPodSpec
+
+ One real-world use of these settings is to enable host networking. For
+ example, to configure host networking for the hub pod, add the
+ following to your helm configuration values:
+
+ ```yaml
+ hub:
+ extraPodSpec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ ```
+
+ Likewise, to configure host networking for the proxy pod, add the
+ following:
+
+ ```yaml
+ proxy:
+ chp:
+ extraPodSpec:
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
+ ```
+
+ N.B. Host networking has special security implications and can easily
+ break your deployment. This is an example—not an endorsement.
+
+ See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)
+ for the latest pod resource specification.
+
+ proxy:
+ type: object
+ additionalProperties: false
+ properties:
+ chp:
+ type: object
+ additionalProperties: false
+ description: |
+ Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic
+ both to itself and to user pods.
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ networkPolicy: *networkPolicy-spec
+ extraCommandLineFlags:
+ type: array
+ description: |
+ A list of strings to be added as command line options when
+ starting
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options)
+ that will be expanded with Helm's template function `tpl` which
+ can render Helm template logic inside curly braces (`{{ ... }}`).
+
+ ```yaml
+ proxy:
+ chp:
+ extraCommandLineFlags:
+ - "--auto-rewrite"
+ - "--custom-header {{ .Values.myCustomStuff }}"
+ ```
+
+ Note that these will be appended last, and if you provide the same
+ flag twice, the last flag will be used, which mean you can
+ override the default flag values as well.
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the chp pod.
+
+ Environment variables are usually used here to:
+ - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values
+ - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ proxy:
+ chp:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ CHP_NAMESPACE:
+ name: CHP_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_CHP_NAMESPACE:
+ value: "my-prefix-$(CHP_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ livenessProbe: *probe-spec
+ readinessProbe: *probe-spec
+ resources: *resources-spec
+ defaultTarget:
+ type: [string, "null"]
+ description: |
+ Override the URL for the default routing target for the proxy.
+ Defaults to JupyterHub itself.
+ This will generally only have an effect while JupyterHub is not running,
+ as JupyterHub adds itself as the default target after it starts.
+ errorTarget:
+ type: [string, "null"]
+ description: |
+ Override the URL for the error target for the proxy.
+ Defaults to JupyterHub itself.
+ Useful to reduce load on the Hub
+ or produce more informative error messages than the Hub's default,
+ e.g. in highly customized deployments such as BinderHub.
+ See Configurable HTTP Proxy for details on implementing an error target.
+ extraPodSpec: *extraPodSpec-spec
+ secretToken:
+ type: [string, "null"]
+ description: |
+ ```{note}
+ As of version 1.0.0 this will automatically be generated and there is
+ no need to set it manually.
+
+ If you wish to reset a generated key, you can use `kubectl edit` on
+ the k8s Secret typically named `hub` and remove the
+ `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret,
+ then perform a new `helm upgrade`.
+ ```
+
+ A 32-byte cryptographically secure randomly generated string used to
+ secure communications between the hub pod and the proxy pod running a
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
+ instance.
+
+ ```sh
+ # to generate a value, run
+ openssl rand -hex 32
+ ```
+
+ Changing this value will cause the proxy and hub pods to restart. It is good security
+ practice to rotate these values over time. If this secret leaks, *immediately* change
+ it to something else, or user data can be compromised.
+ service:
+ type: object
+ additionalProperties: false
+ description: |
+ Configuration of the k8s Service `proxy-public` which either will
+ point to the `autohttps` pod running Traefik for TLS termination, or
+ the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from
+ users on the internet should always go through this k8s Service.
+
+ When this service targets the `autohttps` pod which then routes to the
+ `proxy` pod, a k8s Service named `proxy-http` will be added targeting
+ the `proxy` pod and only accepting HTTP traffic on port 80.
+ properties:
+ type:
+ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
+ description: |
+ Default `LoadBalancer`.
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
+ to learn more about service types.
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the proxy service.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the service that is exposing the proxy.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ nodePorts:
+ type: object
+ additionalProperties: false
+ description: |
+ Object to set NodePorts to expose the service on for http and https.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
+ for more details about NodePorts.
+ properties:
+ http:
+ type: [integer, "null"]
+ description: |
+ The HTTP port the proxy-public service should be exposed on.
+ https:
+ type: [integer, "null"]
+ description: |
+ The HTTPS port the proxy-public service should be exposed on.
+ disableHttpPort:
+ type: boolean
+ description: |
+ Default `false`.
+
+ If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port.
+ extraPorts:
+ type: array
+ description: |
+ Extra ports the k8s Service should accept incoming traffic on,
+ which will be redirected to either the `autohttps` pod (treafik)
+ or the `proxy` pod (chp).
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core)
+ for the structure of the items in this list.
+ loadBalancerIP:
+ type: [string, "null"]
+ description: |
+ The public IP address the proxy-public Kubernetes service should
+ be exposed on. This entry will end up at the configurable proxy
+ server that JupyterHub manages, which will direct traffic to user
+ pods at the `/user` path and the hub pod at the `/hub` path.
+
+ Set this if you want to use a fixed external IP address instead of
+ a dynamically acquired one. This is relevant if you have a domain
+ name that you want to point to a specific IP and want to ensure it
+ doesn't change.
+ loadBalancerSourceRanges:
+ type: array
+ description: |
+ A list of IP CIDR ranges that are allowed to access the load balancer service.
+ Defaults to allowing everyone to access it.
+ https:
+ type: object
+ additionalProperties: false
+ description: |
+ Object for customizing the settings for HTTPS used by the JupyterHub's proxy.
+ For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https)
+ properties:
+ enabled:
+ type: [boolean, "null"]
+ description: |
+ Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided.
+ type:
+ enum: [null, "", letsencrypt, manual, offload, secret]
+ description: |
+ The type of HTTPS encryption that is used.
+ Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object.
+ Defaults to `letsencrypt`.
+ letsencrypt:
+ type: object
+ additionalProperties: false
+ properties:
+ contactEmail:
+ type: [string, "null"]
+ description: |
+ The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https).
+ Required for automatic HTTPS.
+ acmeServer:
+ type: [string, "null"]
+ description: |
+ Let's Encrypt is one of various ACME servers that can provide
+ a certificate, and by default their production server is used.
+
+ Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory
+ Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory
+ manual:
+ type: object
+ additionalProperties: false
+ description: |
+ Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`.
+ See [Set up manual HTTPS](setup-manual-https)
+ properties:
+ key:
+ type: [string, "null"]
+ description: |
+ The RSA private key to be used for HTTPS.
+ To be provided in the form of
+
+ ```
+ key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ ...
+ -----END RSA PRIVATE KEY-----
+ ```
+ cert:
+ type: [string, "null"]
+ description: |
+ The certificate to be used for HTTPS.
+ To be provided in the form of
+
+ ```
+ cert: |
+ -----BEGIN CERTIFICATE-----
+ ...
+ -----END CERTIFICATE-----
+ ```
+ secret:
+ type: object
+ additionalProperties: false
+ description: |
+ Secret to be provided when setting `https.type` to `secret`.
+ properties:
+ name:
+ type: [string, "null"]
+ description: |
+ Name of the secret
+ key:
+ type: [string, "null"]
+ description: |
+ Path to the private key to be used for HTTPS.
+ Example: `'tls.key'`
+ crt:
+ type: [string, "null"]
+ description: |
+ Path to the certificate to be used for HTTPS.
+ Example: `'tls.crt'`
+ hosts:
+ type: array
+ description: |
+ You domain in list form.
+ Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https).
+ To be provided like:
+ ```
+ hosts:
+ -
+ ```
+ traefik:
+ type: object
+ additionalProperties: false
+ description: |
+ Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the traefik pod.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ networkPolicy: *networkPolicy-spec
+ extraInitContainers:
+ type: array
+ description: |
+ list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ proxy:
+ traefik:
+ extraInitContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the traefik pod.
+
+ Environment Variables here may be used to configure traefik.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes.
+
+ ```yaml
+ proxy:
+ traefik:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ TRAEFIK_NAMESPACE:
+ name: TRAEFIK_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_TRAEFIK_NAMESPACE:
+ value: "my-prefix-$(TRAEFIK_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ extraDynamicConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ This refers to traefik's post-startup configuration.
+
+ This Helm chart already provide such configuration, so this is a
+ place where you can merge in additional configuration. If you are
+ about to use this configuration, you may want to inspect the
+ default configuration declared
+ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml).
+ extraPorts:
+ type: array
+ description: |
+ Extra ports for the traefik container within the autohttps pod
+ that you would like to expose, formatted in a k8s native way.
+ extraStaticConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ This refers to traefik's startup configuration.
+
+ This Helm chart already provide such configuration, so this is a
+ place where you can merge in additional configuration. If you are
+ about to use this configuration, you may want to inspect the
+ default configuration declared
+ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml).
+ extraVolumes: *extraVolumes-spec
+ extraVolumeMounts: *extraVolumeMounts-spec
+ hsts:
+ type: object
+ additionalProperties: false
+ required: [includeSubdomains, maxAge, preload]
+ description: |
+ This section regards a HTTP Strict-Transport-Security (HSTS)
+ response header. It can act as a request for a visiting web
+ browsers to enforce HTTPS on their end in for a given time into
+ the future, and optionally also for future requests to subdomains.
+
+ These settings relate to traefik configuration which we use as a
+ TLS termination proxy.
+
+ See [Mozilla's
+ documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
+ for more information.
+ properties:
+ includeSubdomains:
+ type: boolean
+ maxAge:
+ type: integer
+ preload:
+ type: boolean
+ image: *image-spec
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ extraPodSpec: *extraPodSpec-spec
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s labels for the proxy pod.
+
+ ```{note}
+ For consistency, this should really be located under
+ proxy.chp.labels but isn't for historical reasons.
+ ```
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ K8s annotations for the proxy pod.
+
+ ```{note}
+ For consistency, this should really be located under
+ proxy.chp.annotations but isn't for historical reasons.
+ ```
+ deploymentStrategy:
+ type: object
+ additionalProperties: false
+ properties:
+ rollingUpdate:
+ type: [string, "null"]
+ type:
+ type: [string, "null"]
+ description: |
+ While the proxy pod running
+ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
+ could run in parallel, two instances running in parallel wouldn't
+ both receive updates from JupyterHub regarding how it should route
+ traffic. Due to this we default to using a deployment strategy of
+ Recreate instead of RollingUpdate.
+ secretSync:
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration section refers to configuration of the sidecar
+ container in the autohttps pod running next to its traefik container
+ responsible for TLS termination.
+
+ The purpose of this container is to store away and load TLS
+ certificates from a k8s Secret. The TLS certificates are acquired by
+ the ACME client (LEGO) that is running within the traefik container,
+ where traefik is using them for TLS termination.
+ properties:
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ resources: *resources-spec
+
+ singleuser:
+ type: object
+ additionalProperties: false
+ description: |
+ Options for customizing the environment that is provided to the users after they log in.
+ properties:
+ networkPolicy: *networkPolicy-spec
+ podNameTemplate:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template).
+ cpu:
+ type: object
+ additionalProperties: false
+ description: |
+ Set CPU limits & guarantees that are enforced for each user.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
+ for more info.
+ properties:
+ limit:
+ type: [number, "null"]
+ guarantee:
+ type: [number, "null"]
+ memory:
+ type: object
+ additionalProperties: false
+ description: |
+ Set Memory limits & guarantees that are enforced for each user.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
+ for more info.
+ properties:
+ limit:
+ type: [number, string, "null"]
+ guarantee:
+ type: [number, string, "null"]
+ description: |
+ Note that this field is referred to as *requests* by the Kubernetes API.
+ image: *image-spec
+ initContainers:
+ type: array
+ description: |
+ list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+
+ ```yaml
+ singleuser:
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'command1']
+ - name: init-mydb
+ image: busybox:1.28
+ command: ['sh', '-c', 'command2']
+ ```
+ profileList:
+ type: array
+ description: |
+ For more information about the profile list, see [KubeSpawner's
+ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner)
+ as this is simply a passthrough to that configuration.
+
+ ```{note}
+ The image-pullers are aware of the overrides of images in
+ `singleuser.profileList` but they won't be if you configure it in
+ JupyterHub's configuration of '`c.KubeSpawner.profile_list`.
+ ```
+
+ ```yaml
+ singleuser:
+ profileList:
+ - display_name: "Default: Shared, 8 CPU cores"
+ description: "Your code will run on a shared machine with CPU only."
+ default: True
+ - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU"
+ description: "Your code will run a personal machine with a GPU."
+ kubespawner_override:
+ extra_resource_limits:
+ nvidia.com/gpu: "1"
+ ```
+ extraFiles: *extraFiles
+ extraEnv:
+ type: [object, array]
+ additionalProperties: true
+ description: |
+ Extra environment variables that should be set for the user pods.
+
+ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
+ is a part of Kubernetes. Note that the user pods will already have
+ access to a set of environment variables that you can use, like
+ `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these
+ inspect [this source
+ code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763).
+
+ ```yaml
+ singleuser:
+ extraEnv:
+ # basic notation (for literal values only)
+ MY_ENV_VARS_NAME1: "my env var value 1"
+
+ # explicit notation (the "name" field takes precedence)
+ USER_NAMESPACE:
+ name: USER_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ # implicit notation (the "name" field is implied)
+ PREFIXED_USER_NAMESPACE:
+ value: "my-prefix-$(USER_NAMESPACE)"
+ SECRET_VALUE:
+ valueFrom:
+ secretKeyRef:
+ name: my-k8s-secret
+ key: password
+ ```
+
+ For more information, see the [Kubernetes EnvVar
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
+ nodeSelector: *nodeSelector-spec
+ extraTolerations: *tolerations-spec
+ extraNodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Affinities describe where pods prefer or require to be scheduled, they
+ may prefer or require a node where they are to be scheduled to have a
+ certain label (node affinity). They may also require to be scheduled
+ in proximity or with a lack of proximity to another pod (pod affinity
+ and anti pod affinity).
+
+ See the [Kubernetes
+ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
+ for more info.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core)
+ objects.
+ extraPodAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ See the description of `singleuser.extraNodeAffinity`.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
+ objects.
+ extraPodAntiAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ See the description of `singleuser.extraNodeAffinity`.
+ properties:
+ required:
+ type: array
+ description: |
+ Pass this field an array of
+ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
+ objects.
+ preferred:
+ type: array
+ description: |
+ Pass this field an array of
+ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
+ objects.
+ cloudMetadata:
+ type: object
+ additionalProperties: false
+ required: [blockWithIptables, ip]
+ description: |
+ Please refer to dedicated section in [the Helm chart
+ documentation](block-metadata-iptables) for more information about
+ this.
+ properties:
+ blockWithIptables:
+ type: boolean
+ ip:
+ type: string
+
+ cmd:
+ type: [array, string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd).
+ The default is "jupyterhub-singleuser".
+ Use `cmd: null` to launch a custom CMD from the image,
+ which must launch jupyterhub-singleuser or an equivalent process eventually.
+ For example: Jupyter's docker-stacks images.
+ defaultUrl:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url).
+ # FIXME: name mismatch, named events_enabled in kubespawner
+ events:
+ type: [boolean, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled).
+ extraAnnotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations).
+ extraContainers:
+ type: array
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers).
+ extraLabels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels).
+ extraPodConfig:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config).
+ extraResource:
+ type: object
+ additionalProperties: false
+ properties:
+ # FIXME: name mismatch, named extra_resource_guarantees in kubespawner
+ guarantees:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees).
+ # FIXME: name mismatch, named extra_resource_limits in kubespawner
+ limits:
+ type: object
+ additionalProperties: true
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits).
+ fsGid:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid).
+ lifecycleHooks:
+ type: object
+ additionalProperties: false
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks).
+ properties:
+ postStart:
+ type: object
+ additionalProperties: true
+ preStop:
+ type: object
+ additionalProperties: true
+ networkTools:
+ type: object
+ additionalProperties: false
+ description: |
+ This configuration section refers to configuration of a conditionally
+ created initContainer for the user pods with a purpose to block a
+ specific IP address.
+
+ This initContainer will be created if
+ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
+ is set to true.
+ properties:
+ image: *image-spec
+ resources: *resources-spec
+ # FIXME: name mismatch, named service_account in kubespawner
+ serviceAccountName:
+ type: [string, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account).
+ startTimeout:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout).
+ storage:
+ type: object
+ additionalProperties: false
+ required: [type, homeMountPath]
+ description: |
+ This section configures KubeSpawner directly to some extent but also
+ indirectly through Helm chart specific configuration options such as
+ [`singleuser.storage.type`](schema_singleuser.storage.type).
+ properties:
+ capacity:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_capacity`.
+
+ See the [KubeSpawner
+ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html)
+ for more information.
+ dynamic:
+ type: object
+ additionalProperties: false
+ properties:
+ pvcNameTemplate:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.pvc_name_template` which will be the
+ resource name of the PVC created by KubeSpawner for each user
+ if needed.
+ storageAccessModes:
+ type: array
+ items:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_access_modes`.
+
+ See KubeSpawners documentation and [the k8s
+ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
+ for more information.
+ storageClass:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.storage_class`, which can be an
+ explicit StorageClass to dynamically provision storage for the
+ PVC that KubeSpawner will create.
+
+ There is of a default StorageClass available in k8s clusters
+ for use if this is unspecified.
+ volumeNameTemplate:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.volume_name_template`, which is the
+ name to reference from the containers volumeMounts section.
+ extraLabels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Configures `KubeSpawner.storage_extra_labels`. Note that these
+ labels are set on the PVC during creation only and won't be
+ updated after creation.
+ extraVolumeMounts: *extraVolumeMounts-spec
+ extraVolumes: *extraVolumes-spec
+ homeMountPath:
+ type: string
+ description: |
+ The location within the container where the home folder storage
+ should be mounted.
+ static:
+ type: object
+ additionalProperties: false
+ properties:
+ pvcName:
+ type: [string, "null"]
+ description: |
+ Configures `KubeSpawner.pvc_claim_name` to reference
+ pre-existing storage.
+ subPath:
+ type: [string, "null"]
+ description: |
+ Configures the `subPath` field of a
+ `KubeSpawner.volume_mounts` entry added by the Helm chart.
+
+ Path within the volume from which the container's volume
+ should be mounted.
+ type:
+ enum: [dynamic, static, none]
+ description: |
+ Decide if you want storage to be provisioned dynamically
+ (dynamic), or if you want to attach existing storage (static), or
+ don't want any storage to be attached (none).
+ allowPrivilegeEscalation:
+ type: [boolean, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation).
+ uid:
+ type: [integer, "null"]
+ description: |
+ Passthrough configuration for
+ [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid).
+
+ This dictates as what user the main container will start up as.
+
+ As an example of when this is needed, consider if you want to enable
+ sudo rights for some of your users. This can be done by starting up as
+ root, enabling it from the container in a startup script, and then
+ transitioning to the normal user.
+
+ Default is 1000, set to null to use the container's default.
+
+ scheduling:
+ type: object
+ additionalProperties: false
+ description: |
+ Objects for customizing the scheduling of various pods on the nodes and
+ related labels.
+ properties:
+ userScheduler:
+ type: object
+ additionalProperties: false
+ required: [enabled, plugins, pluginConfig, logLevel]
+ description: |
+ The user scheduler is making sure that user pods are scheduled
+ tight on nodes, this is useful for autoscaling of user node pools.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enables the user scheduler.
+ revisionHistoryLimit: *revisionHistoryLimit
+ replicas:
+ type: integer
+ description: |
+ You can have multiple schedulers to share the workload or improve
+ availability on node failure.
+ image: *image-spec
+ pdb: *pdb-spec
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the userScheduler pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra annotations to add to the user-scheduler pods.
+ containerSecurityContext: *containerSecurityContext-spec
+ logLevel:
+ type: integer
+ description: |
+ Corresponds to the verbosity level of logging made by the
+ kube-scheduler binary running within the user-scheduler pod.
+ plugins:
+ type: object
+ additionalProperties: true
+ description: |
+ These plugins refers to kube-scheduler plugins as documented
+ [here](https://kubernetes.io/docs/reference/scheduling/config/).
+
+ The user-scheduler is really just a kube-scheduler configured in a
+ way to pack users tight on nodes using these plugins. See
+ values.yaml for information about the default plugins.
+ pluginConfig:
+ type: array
+ description: |
+ Individually activated plugins can be configured further.
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ extraPodSpec: *extraPodSpec-spec
+ podPriority:
+ type: object
+ additionalProperties: false
+ description: |
+ Pod Priority is used to allow real users evict user placeholder pods
+ that in turn by entering a Pending state can trigger a scale up by a
+ cluster autoscaler.
+
+ Having this option enabled only make sense if the following conditions
+ are met:
+
+ 1. A cluster autoscaler is installed.
+ 2. user-placeholer pods are configured to have a priority equal or
+ higher than the cluster autoscaler's "priority cutoff" so that the
+ cluster autoscaler scales up a node in advance for a pending user
+ placeholder pod.
+ 3. Normal user pods have a higher priority than the user-placeholder
+ pods.
+ 4. Image puller pods have a priority between normal user pods and
+ user-placeholder pods.
+
+ Note that if the default priority cutoff if not configured on cluster
+ autoscaler, it will currently default to 0, and that in the future
+ this is meant to be lowered. If your cloud provider is installing the
+ cluster autoscaler for you, they may also configure this specifically.
+
+ Recommended settings for a cluster autoscaler...
+
+ ... with a priority cutoff of -10 (GKE):
+
+ ```yaml
+ podPriority:
+ enabled: true
+ globalDefault: false
+ defaultPriority: 0
+ imagePullerPriority: -5
+ userPlaceholderPriority: -10
+ ```
+
+ ... with a priority cutoff of 0:
+
+ ```yaml
+ podPriority:
+ enabled: true
+ globalDefault: true
+ defaultPriority: 10
+ imagePullerPriority: 5
+ userPlaceholderPriority: 0
+ ```
+ properties:
+ enabled:
+ type: boolean
+ globalDefault:
+ type: boolean
+ description: |
+ Warning! This will influence all pods in the cluster.
+
+ The priority a pod usually get is 0. But this can be overridden
+ with a PriorityClass resource if it is declared to be the global
+ default. This configuration option allows for the creation of such
+ global default.
+ defaultPriority:
+ type: integer
+ description: |
+ The actual value for the default pod priority.
+ imagePullerPriority:
+ type: integer
+ description: |
+ The actual value for the [hook|continuous]-image-puller pods' priority.
+ userPlaceholderPriority:
+ type: integer
+ description: |
+ The actual value for the user-placeholder pods' priority.
+ userPlaceholder:
+ type: object
+ additionalProperties: false
+ description: |
+ User placeholders simulate users but will thanks to PodPriority be
+ evicted by the cluster autoscaler if a real user shows up. In this way
+ placeholders allow you to create a headroom for the real users and
+ reduce the risk of a user having to wait for a node to be added. Be
+ sure to use the the continuous image puller as well along with
+ placeholders, so the images are also available when real users arrive.
+
+ To test your setup efficiently, you can adjust the amount of user
+ placeholders with the following command:
+ ```sh
+ # Configure to have 3 user placeholders
+ kubectl scale sts/user-placeholder --replicas=3
+ ```
+ properties:
+ enabled:
+ type: boolean
+ image: *image-spec
+ revisionHistoryLimit: *revisionHistoryLimit
+ replicas:
+ type: integer
+ description: |
+ How many placeholder pods would you like to have?
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the userPlaceholder pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra annotations to add to the placeholder pods.
+ resources:
+ type: object
+ additionalProperties: true
+ description: |
+ Unless specified here, the placeholder pods will request the same
+ resources specified for the real singleuser pods.
+ containerSecurityContext: *containerSecurityContext-spec
+ corePods:
+ type: object
+ additionalProperties: false
+ description: |
+ These settings influence the core pods like the hub, proxy and
+ user-scheduler pods.
+ These settings influence all pods considered core pods, namely:
+
+ - hub
+ - proxy
+ - autohttps
+ - hook-image-awaiter
+ - user-scheduler
+
+ By defaults, the tolerations are:
+
+ - hub.jupyter.org/dedicated=core:NoSchedule
+ - hub.jupyter.org_dedicated=core:NoSchedule
+
+ Note that tolerations set here are combined with the respective
+ components dedicated tolerations, and that `_` is available in case
+ `/` isn't allowed in the clouds tolerations.
+ properties:
+ tolerations: *tolerations-spec
+ nodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Where should pods be scheduled? Perhaps on nodes with a certain
+ label is preferred or even required?
+ properties:
+ matchNodePurpose:
+ enum: [ignore, prefer, require]
+ description: |
+ Decide if core pods *ignore*, *prefer* or *require* to
+ schedule on nodes with this label:
+ ```
+ hub.jupyter.org/node-purpose=core
+ ```
+ userPods:
+ type: object
+ additionalProperties: false
+ description: |
+ These settings influence all pods considered user pods, namely:
+
+ - user-placeholder
+ - hook-image-puller
+ - continuous-image-puller
+ - jupyter-
+
+ By defaults, the tolerations are:
+
+ - hub.jupyter.org/dedicated=core:NoSchedule
+ - hub.jupyter.org_dedicated=core:NoSchedule
+
+ Note that tolerations set here are combined with the respective
+ components dedicated tolerations, and that `_` is available in case
+ `/` isn't allowed in the clouds tolerations.
+ properties:
+ tolerations: *tolerations-spec
+ nodeAffinity:
+ type: object
+ additionalProperties: false
+ description: |
+ Where should pods be scheduled? Perhaps on nodes with a certain
+ label is preferred or even required?
+ properties:
+ matchNodePurpose:
+ enum: [ignore, prefer, require]
+ description: |
+ Decide if user pods *ignore*, *prefer* or *require* to
+ schedule on nodes with this label:
+ ```
+ hub.jupyter.org/node-purpose=user
+ ```
+
+ ingress:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enable the creation of a Kubernetes Ingress to proxy-public service.
+
+ See [Advanced Topics — Zero to JupyterHub with Kubernetes
+ 0.7.0 documentation](ingress)
+ for more details.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the Ingress resource.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ for more details about annotations.
+ ingressClassName:
+ type: [string, "null"]
+ description: |
+ Maps directly to the Ingress resource's `spec.ingressClassName``.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class)
+ for more details.
+ hosts:
+ type: array
+ description: |
+ List of hosts to route requests to the proxy.
+ pathSuffix:
+ type: [string, "null"]
+ description: |
+ Suffix added to Ingress's routing path pattern.
+
+ Specify `*` if your ingress matches path by glob pattern.
+ pathType:
+ enum: [Prefix, Exact, ImplementationSpecific]
+ description: |
+ The path type to use. The default value is 'Prefix'.
+
+ See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
+ for more details about path types.
+ tls:
+ type: array
+ description: |
+ TLS configurations for Ingress.
+
+ See [the Kubernetes
+ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)
+ for more details about annotations.
+
+ prePuller:
+ type: object
+ additionalProperties: false
+ required: [hook, continuous]
+ properties:
+ revisionHistoryLimit: *revisionHistoryLimit
+ labels:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Extra labels to add to the pre puller job pods.
+
+ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ to learn more about labels.
+ annotations:
+ type: object
+ additionalProperties: false
+ patternProperties: *labels-and-annotations-patternProperties
+ description: |
+ Annotations to apply to the hook and continous image puller pods. One example use case is to
+ disable istio sidecars which could interfere with the image pulling.
+ resources:
+ type: object
+ additionalProperties: true
+ description: |
+ These are standard Kubernetes resources with requests and limits for
+ cpu and memory. They will be used on the containers in the pods
+ pulling images. These should be set extremely low as the containers
+ shut down directly or is a pause container that just idles.
+
+ They were made configurable as usage of ResourceQuota may require
+ containers in the namespace to have explicit resources set.
+ extraTolerations: *tolerations-spec
+ hook:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ See the [*optimization
+ section*](pulling-images-before-users-arrive)
+ for more details.
+ properties:
+ enabled:
+ type: boolean
+ pullOnlyOnChanges:
+ type: boolean
+ description: |
+ Pull only if changes have been made to the images to pull, or more
+ accurately if the hook-image-puller daemonset has changed in any
+ way.
+ podSchedulingWaitDuration:
+ description: |
+ The `hook-image-awaiter` has a criteria to await all the
+ `hook-image-puller` DaemonSet's pods to both schedule and finish
+ their image pulling. This flag can be used to relax this criteria
+ to instead only await the pods that _has already scheduled_ to
+ finish image pulling after a certain duration.
+
+ The value of this is that sometimes the newly created
+ `hook-image-puller` pods cannot be scheduled because nodes are
+ full, and then it probably won't make sense to block a `helm
+ upgrade`.
+
+ An infinite duration to wait for pods to schedule can be
+ represented by `-1`. This was the default behavior of version
+ 0.9.0 and earlier.
+ type: integer
+ nodeSelector: *nodeSelector-spec
+ tolerations: *tolerations-spec
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+ resources: *resources-spec
+ serviceAccount: *serviceAccount
+ continuous:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ See the [*optimization
+ section*](pulling-images-before-users-arrive)
+ for more details.
+
+ ```{note}
+ If used with a Cluster Autoscaler (an autoscaling node pool), also add
+ user-placeholders and enable pod priority.
+ ```
+ properties:
+ enabled:
+ type: boolean
+ pullProfileListImages:
+ type: boolean
+ description: |
+ The singleuser.profileList configuration can provide a selection of
+ images. This option determines if all images identified there should
+ be pulled, both by the hook and continuous pullers.
+
+ Images are looked for under `kubespawner_override`, and also
+ `profile_options.choices.kubespawner_override` since version 3.2.0.
+
+ The reason to disable this, is that if you have for example 10 images
+ which start pulling in order from 1 to 10, a user that arrives and
+ wants to start a pod with image number 10 will need to wait for all
+ images to be pulled, and then it may be preferable to just let the
+ user arriving wait for a single image to be pulled on arrival.
+ extraImages:
+ type: object
+ additionalProperties: false
+ description: |
+ See the [*optimization section*](images-that-will-be-pulled) for more
+ details.
+
+ ```yaml
+ prePuller:
+ extraImages:
+ my-extra-image-i-want-pulled:
+ name: jupyter/all-spark-notebook
+ tag: 2343e33dec46
+ ```
+ patternProperties:
+ ".*":
+ type: object
+ additionalProperties: false
+ required: [name, tag]
+ properties:
+ name:
+ type: string
+ tag:
+ type: string
+ containerSecurityContext: *containerSecurityContext-spec
+ pause:
+ type: object
+ additionalProperties: false
+ description: |
+ The image-puller pods rely on initContainer to pull all images, and
+ their actual container when they are done is just running a `pause`
+ container. These are settings for that pause container.
+ properties:
+ containerSecurityContext: *containerSecurityContext-spec
+ image: *image-spec
+
+ custom:
+ type: object
+ additionalProperties: true
+ description: |
+ Additional values to pass to the Hub.
+ JupyterHub will not itself look at these,
+ but you can read values in your own custom config via `hub.extraConfig`.
+ For example:
+
+ ```yaml
+ custom:
+ myHost: "https://example.horse"
+ hub:
+ extraConfig:
+ myConfig.py: |
+ c.MyAuthenticator.host = get_config("custom.myHost")
+ ```
+
+ cull:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ description: |
+ The
+ [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler)
+ can run as a JupyterHub managed service to _cull_ running servers.
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Enable/disable use of jupyter-idle-culler.
+ users:
+ type: [boolean, "null"]
+ description: See the `--cull-users` flag.
+ adminUsers:
+ type: [boolean, "null"]
+ description: See the `--cull-admin-users` flag.
+ removeNamedServers:
+ type: [boolean, "null"]
+ description: See the `--remove-named-servers` flag.
+ timeout:
+ type: [integer, "null"]
+ description: See the `--timeout` flag.
+ every:
+ type: [integer, "null"]
+ description: See the `--cull-every` flag.
+ concurrency:
+ type: [integer, "null"]
+ description: See the `--concurrency` flag.
+ maxAge:
+ type: [integer, "null"]
+ description: See the `--max-age` flag.
+
+ debug:
+ type: object
+ additionalProperties: false
+ required: [enabled]
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Increases the loglevel throughout the resources in the Helm chart.
+
+ rbac:
+ type: object
+ additionalProperties: false
+ required: [create]
+ properties:
+ enabled:
+ type: boolean
+ # This schema entry is needed to help us print a more helpful error
+ # message in NOTES.txt if hub.fsGid is set.
+ #
+ description: |
+ ````{note}
+ Removed in version 2.0.0. If you have been using `rbac.enable=false`
+ (strongly discouraged), then the equivalent configuration would be:
+
+ ```yaml
+ rbac:
+ create: false
+ hub:
+ serviceAccount:
+ create: false
+ proxy:
+ traefik:
+ serviceAccount:
+ create: false
+ scheduling:
+ userScheduler:
+ serviceAccount:
+ create: false
+ prePuller:
+ hook:
+ serviceAccount:
+ create: false
+ ```
+ ````
+ create:
+ type: boolean
+ description: |
+ Decides if (Cluster)Role and (Cluster)RoleBinding resources are
+ created and bound to the configured serviceAccounts.
+
+ global:
+ type: object
+ additionalProperties: true
+ properties:
+ safeToShowValues:
+ type: boolean
+ description: |
+ A flag that should only be set to true temporarily when experiencing a
+ deprecation message that contain censored content that you wish to
+ reveal.
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
index 2f5cbca3c..5acc79282 100755
--- a/applications/jupyterhub/deploy/values.yaml
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -1,4 +1,4 @@
-harness:
+harness: # EDIT: CLOUDHARNESS
subdomain: hub
service:
auto: false
@@ -12,6 +12,7 @@ harness:
- accounts
build:
- cloudharness-base
+ prepull: [] # additional images to add to the prepuller
quotas:
# sets the maximum number of (included named) servers open concurrently (int)
quota-ws-open: 3
@@ -25,12 +26,24 @@ harness:
quota-ws-maxmem: 0.5
# sets the storage dedicated to the user data in Gb units (float)
quota-storage-max: 1.25
+ test:
+ e2e:
+ enabled: true
+ smoketest: true
+ ignoreRequestErrors: false
+ ignoreConsoleErrors: false
+
# fullnameOverride and nameOverride distinguishes blank strings, null values,
# and non-blank strings. For more details, see the configuration reference.
fullnameOverride: ""
nameOverride:
+# enabled is ignored by the jupyterhub chart itself, but a chart depending on
+# the jupyterhub chart conditionally can make use this config option as the
+# condition.
+enabled:
+
# custom can contain anything you want to pass to the hub pod, as all passed
# Helm template values will be made available there.
custom: {}
@@ -54,10 +67,11 @@ imagePullSecrets: []
# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
# the proxy pod.
hub:
+ revisionHistoryLimit:
config:
JupyterHub:
admin_access: true
- authenticator_class: keycloak
+ authenticator_class: keycloak # EDIT: CLOUDHARNESS
service:
type: ClusterIP
annotations: {}
@@ -68,7 +82,6 @@ hub:
baseUrl: /
cookieSecret:
initContainers: []
- fsGid: 1000
nodeSelector: {}
tolerations: []
concurrentSpawnLimit: 64
@@ -106,37 +119,38 @@ hub:
extraVolumes: []
extraVolumeMounts: []
image:
- name: jupyterhub/k8s-hub
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-hub
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
+ podSecurityContext:
+ fsGroup: 1000
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
lifecycle: {}
+ loadRoles: {}
services: {}
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
networkPolicy:
- enabled: false
+ enabled: false # EDIT: CLOUDHARNESS -- cannot connect to accounts otherwise
ingress: []
- ## egress for JupyterHub already includes Kubernetes internal DNS and
- ## access to the proxy, but can be restricted further, but ensure to allow
- ## access to the Kubernetes API server that couldn't be pinned ahead of
- ## time.
- ##
- ## ref: https://stackoverflow.com/a/59016417/2220152
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
- allowNamedServers: true
+ allowNamedServers: true # EDIT: CLOUDHARNESS
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
@@ -163,11 +177,13 @@ hub:
timeoutSeconds: 1
existingSecret:
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
rbac:
- enabled: true
+ create: true
# proxy relates to the proxy pod, the proxy-public service, and the autohttps
# pod and proxy-http service.
@@ -202,7 +218,7 @@ proxy:
rollingUpdate:
# service relates to the proxy-public service
service:
- type: NodePort
+ type: NodePort # EDIT: CLOUDHARNESS
labels: {}
annotations: {}
nodePorts:
@@ -215,13 +231,17 @@ proxy:
# chp relates to the proxy pod, which is responsible for routing traffic based
# on dynamic configuration sent from JupyterHub to CHP's REST API.
chp:
+ revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: jupyterhub/configurable-http-proxy
- tag: 4.5.0 # https://github.com/jupyterhub/configurable-http-proxy/releases
+ name: quay.io/jupyterhub/configurable-http-proxy
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy:
pullSecrets: []
extraCommandLineFlags: []
@@ -229,11 +249,14 @@ proxy:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
+ failureThreshold: 30
+ timeoutSeconds: 3
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
+ timeoutSeconds: 1
resources: {}
defaultTarget:
errorTarget:
@@ -241,12 +264,16 @@ proxy:
nodeSelector: {}
tolerations: []
networkPolicy:
- enabled: false
+ enabled: true
ingress: []
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
@@ -257,13 +284,17 @@ proxy:
# traefik relates to the autohttps pod, which is responsible for TLS
# termination when proxy.https.type=letsencrypt.
traefik:
+ revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: traefik
- tag: v2.4.11 # ref: https://hub.docker.com/_/traefik?tab=tags
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "v2.10.7" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
@@ -272,6 +303,7 @@ proxy:
maxAge: 15724800 # About 6 months
resources: {}
labels: {}
+ extraInitContainers: []
extraEnv: {}
extraVolumes: []
extraVolumeMounts: []
@@ -283,10 +315,14 @@ proxy:
networkPolicy:
enabled: true
ingress: []
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
@@ -294,6 +330,8 @@ proxy:
maxUnavailable:
minAvailable: 1
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
secretSync:
@@ -302,8 +340,8 @@ proxy:
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: jupyterhub/k8s-secret-sync
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-secret-sync
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
@@ -342,29 +380,27 @@ singleuser:
preferred: []
networkTools:
image:
- name: jupyterhub/k8s-network-tools
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-network-tools
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
+ resources: {}
cloudMetadata:
# block set to true will append a privileged initContainer using the
# iptables to block the sensitive metadata server at the provided ip.
- blockWithIptables: false
+ blockWithIptables: true
+ ip: 169.254.169.254
networkPolicy:
- enabled: false
+ enabled: true
ingress: []
- egress:
- # Required egress to communicate with the hub and DNS servers will be
- # augmented to these egress rules.
- #
- # This default rule explicitly allows all outbound traffic from singleuser
- # pods, except to a typical IP used to return metadata that can be used by
- # someone with malicious intent.
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
- except:
- - 169.254.169.254/32
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: false
+ dnsPortsCloudMetadataServer: true
+ dnsPortsKubeSystemNamespace: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: false
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
@@ -376,6 +412,7 @@ singleuser:
lifecycleHooks: {}
initContainers: []
extraContainers: []
+ allowPrivilegeEscalation: false
uid: 1000
fsGid: 100
serviceAccountName:
@@ -387,29 +424,29 @@ singleuser:
static:
pvcName:
subPath: "{username}"
- capacity: 10Mi
- homeMountPath: /home/workspace
+ capacity: 10Mi # EDIT: CLOUDHARNESS
+ homeMountPath: /home/workspace # EDIT: CLOUDHARNESS
dynamic:
storageClass:
- pvcNameTemplate: jupyter-{username}
- volumeNameTemplate: jupyter-{username}
+ pvcNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
+ volumeNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
storageAccessModes: [ReadWriteOnce]
image:
- name: jupyter/base-notebook
- tag: "hub-1.4.2"
+ name: quay.io/jupyterhub/k8s-singleuser-sample
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
startTimeout: 300
cpu:
- limit: 0.4
- guarantee: 0.05
+ limit: 0.4 # EDIT: CLOUDHARNESS
+ guarantee: 0.05 # EDIT: CLOUDHARNESS
memory:
- limit: 0.5G
- guarantee: 0.1G
+ limit: 0.5G # EDIT: CLOUDHARNESS
+ guarantee: 0.1G # EDIT: CLOUDHARNESS
extraResource:
limits: {}
guarantees: {}
- cmd: /usr/local/bin/start-singleuser.sh
+ cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
profileList: []
@@ -417,74 +454,146 @@ singleuser:
# scheduling relates to the user-scheduler pods and user-placeholder pods.
scheduling:
userScheduler:
- enabled: false
+ enabled: false # EDIT: CLOUDHARNESS
+ revisionHistoryLimit:
replicas: 2
logLevel: 4
+ # plugins are configured on the user-scheduler to make us score how we
+ # schedule user pods in a way to help us schedule on the most busy node. By
+ # doing this, we help scale down more effectively. It isn't obvious how to
+ # enable/disable scoring plugins, and configure them, to accomplish this.
+ #
# plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
+ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
+ #
plugins:
score:
+ # These scoring plugins are enabled by default according to
+ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
+ # 2022-02-22.
+ #
+ # Enabled with high priority:
+ # - NodeAffinity
+ # - InterPodAffinity
+ # - NodeResourcesFit
+ # - ImageLocality
+ # Remains enabled with low default priority:
+ # - TaintToleration
+ # - PodTopologySpread
+ # - VolumeBinding
+ # Disabled for scoring:
+ # - NodeResourcesBalancedAllocation
+ #
disabled:
- - name: SelectorSpread
- - name: TaintToleration
- - name: PodTopologySpread
+ # We disable these plugins (with regards to scoring) to not interfere
+ # or complicate our use of NodeResourcesFit.
- name: NodeResourcesBalancedAllocation
- - name: NodeResourcesLeastAllocated
# Disable plugins to be allowed to enable them again with a different
# weight and avoid an error.
- - name: NodePreferAvoidPods
- name: NodeAffinity
- name: InterPodAffinity
+ - name: NodeResourcesFit
- name: ImageLocality
enabled:
- - name: NodePreferAvoidPods
- weight: 161051
- name: NodeAffinity
weight: 14631
- name: InterPodAffinity
weight: 1331
- - name: NodeResourcesMostAllocated
+ - name: NodeResourcesFit
weight: 121
- name: ImageLocality
weight: 11
+ pluginConfig:
+ # Here we declare that we should optimize pods to fit based on a
+ # MostAllocated strategy instead of the default LeastAllocated.
+ - name: NodeResourcesFit
+ args:
+ scoringStrategy:
+ resources:
+ - name: cpu
+ weight: 1
+ - name: memory
+ weight: 1
+ type: MostAllocated
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
# IMPORTANT: Bumping the minor version of this binary should go hand in
- # hand with an inspection of the user-scheduelrs RBAC resources
- # that we have forked.
- name: k8s.gcr.io/kube-scheduler
- tag: v1.19.13 # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
+ # hand with an inspection of the user-scheduelr's RBAC
+ # resources that we have forked in
+ # templates/scheduling/user-scheduler/rbac.yaml.
+ #
+ # Debugging advice:
+ #
+ # - Is configuration of kube-scheduler broken in
+ # templates/scheduling/user-scheduler/configmap.yaml?
+ #
+ # - Is the kube-scheduler binary's compatibility to work
+ # against a k8s api-server that is too new or too old?
+ #
+ # - You can update the GitHub workflow that runs tests to
+ # include "deploy/user-scheduler" in the k8s namespace report
+ # and reduce the user-scheduler deployments replicas to 1 in
+ # dev-config.yaml to get relevant logs from the user-scheduler
+ # pods. Inspect the "Kubernetes namespace report" action!
+ #
+ # - Typical failures are that kube-scheduler fails to search for
+ # resources via its "informers", and won't start trying to
+ # schedule pods before they succeed which may require
+ # additional RBAC permissions or that the k8s api-server is
+ # aware of the resources.
+ #
+ # - If "successfully acquired lease" can be seen in the logs, it
+ # is a good sign kube-scheduler is ready to schedule pods.
+ #
+ name: registry.k8s.io/kube-scheduler
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow. The minor version is pinned in the
+ # workflow, and should be updated there if a minor version bump is done
+ # here. We aim to stay around 1 minor version behind the latest k8s
+ # version.
+ #
+ tag: "v1.28.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy:
pullSecrets: []
nodeSelector: {}
tolerations: []
+ labels: {}
+ annotations: {}
pdb:
enabled: true
maxUnavailable: 1
minAvailable:
resources: {}
serviceAccount:
+ create: true
+ name:
annotations: {}
extraPodSpec: {}
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
+ imagePullerPriority: -5
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
image:
- name: k8s.gcr.io/pause
- # tag's can be updated by inspecting the output of the command:
- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
+ name: registry.k8s.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
#
# If you update this, also update prePuller.pause.image.tag
- tag: "3.5"
+ #
+ tag: "3.9"
pullPolicy:
pullSecrets: []
+ revisionHistoryLimit:
replicas: 0
+ labels: {}
+ annotations: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
@@ -517,6 +626,8 @@ scheduling:
# prePuller relates to the hook|continuous-image-puller DaemonsSets
prePuller:
+ revisionHistoryLimit:
+ labels: {}
annotations: {}
resources: {}
containerSecurityContext:
@@ -530,8 +641,8 @@ prePuller:
pullOnlyOnChanges: true
# image and the configuration below relates to the hook-image-awaiter Job
image:
- name: jupyterhub/k8s-image-awaiter
- tag: "1.1.3"
+ name: quay.io/jupyterhub/k8s-image-awaiter
+ tag: "3.2.1"
pullPolicy:
pullSecrets: []
containerSecurityContext:
@@ -543,6 +654,8 @@ prePuller:
tolerations: []
resources: {}
serviceAccount:
+ create: true
+ name:
annotations: {}
continuous:
enabled: true
@@ -554,18 +667,20 @@ prePuller:
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
- name: k8s.gcr.io/pause
- # tag's can be updated by inspecting the output of the command:
- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
+ name: registry.k8s.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
#
# If you update this, also update scheduling.userPlaceholder.image.tag
- tag: "3.5"
+ #
+ tag: "3.9"
pullPolicy:
pullSecrets: []
ingress:
enabled: false
annotations: {}
+ ingressClassName:
hosts: []
pathSuffix:
pathType: Prefix
@@ -581,7 +696,8 @@ ingress:
cull:
enabled: true
users: false # --cull-users
- removeNamedServers: true # --remove-named-servers
+ adminUsers: true # --cull-admin-users
+ removeNamedServers: true # EDIT: CLOUDHARNESS
timeout: 3600 # --timeout
every: 600 # --cull-every
concurrency: 10 # --concurrency
diff --git a/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py b/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py
index e7b4cb0dc..ba4489fce 100644
--- a/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py
+++ b/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py
@@ -13,11 +13,13 @@
handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
+
class CloudHarnessAuthenticateHandler(BaseHandler):
"""
Handler for /chkclogin
Creates a new user based on the keycloak user, and auto starts their server
"""
+
def initialize(self, force_new_server, process_user):
super().initialize()
self.force_new_server = force_new_server
@@ -28,23 +30,23 @@ def get(self):
self.clear_login_cookie()
try:
-
- accessToken = self.request.cookies.get(
- 'kc-access', None) or self.request.cookies.get('accessToken', None)
- print("Token", accessToken)
- if accessToken == '-1' or not accessToken:
- self.redirect('/hub/logout')
-
- accessToken = accessToken.value
- user_data = AuthClient.decode_token(accessToken)
- username = user_data['sub']
- print("Username", username, "-",user_data['preferred_username'])
- raw_user = self.user_from_username(username)
- print("JH user: ", raw_user.__dict__)
- self.set_login_cookie(raw_user)
+
+ accessToken = self.request.cookies.get(
+ 'kc-access', None) or self.request.cookies.get('accessToken', None)
+ print("Token", accessToken)
+ if accessToken == '-1' or not accessToken:
+ self.redirect('/hub/logout')
+
+ accessToken = accessToken.value
+ user_data = AuthClient.decode_token(accessToken)
+ username = user_data['sub']
+ print("Username", username, "-", user_data['preferred_username'])
+ raw_user = self.user_from_username(username)
+ print("JH user: ", raw_user.__dict__)
+ self.set_login_cookie(raw_user)
except Exception as e:
- logging.error("Error getting user from session", exc_info=True)
- raise
+ logging.error("Error getting user from session", exc_info=True)
+ raise
user = yield gen.maybe_future(self.process_user(raw_user, self))
self.redirect(self.get_next_url(user))
diff --git a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
index 220883a8d..3c9679e34 100644
--- a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
+++ b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
@@ -18,6 +18,7 @@
def custom_options_form(spawner, abc):
# let's skip the profile selection form for now
# ToDo: for future we can remove this hook
+ spawner._ch_profile_list = spawner.profile_list
spawner.profile_list = []
# ref: https://github.com/jupyterhub/kubespawner/blob/37a80abb0a6c826e5c118a068fa1cf2725738038/kubespawner/spawner.py#L1885-L1935
return spawner._options_form_default()
@@ -115,7 +116,7 @@ def change_pod_manifest(self: KubeSpawner):
quota_ws_open = user_quotas.get("quota-ws-open")
# Default value, might be overwritten by the app config
- self.storage_pvc_ensure = bool(self.pvc_name)
+ self.storage_pvc_ensure = bool(self.pvc_name)
if quota_ws_open:
# get user number of pods running
@@ -123,11 +124,11 @@ def change_pod_manifest(self: KubeSpawner):
num_of_pods = len([s for s in servers if s.active])
if num_of_pods > int(quota_ws_open):
raise PodSpawnException(
- "You reached your quota of {} concurrent servers."
- " One must be deleted before a new server can be started".format(
- quota_ws_open
- ),
- )
+ "You reached your quota of {} concurrent servers."
+ " One must be deleted before a new server can be started".format(
+ quota_ws_open
+ ),
+ )
try:
subdomain = self.handler.request.host.split(
str(self.config['domain']))[0][0:-1]
@@ -139,7 +140,7 @@ def change_pod_manifest(self: KubeSpawner):
if 'subdomain' in harness and harness['subdomain'] == subdomain:
ws_image = getattr(self, "ws_image", None)
- logging.info("Subdomain is", subdomain)
+ logging.info("Subdomain is %s", subdomain)
if ws_image:
# try getting the image + tag from values.yaml
ch_conf = conf.get_configuration()
@@ -262,13 +263,12 @@ def change_pod_manifest(self: KubeSpawner):
from pprint import pprint
pprint(self.storage_class)
-
# If there's a timeout, just let it propagate
asyncio.ensure_future(exponential_backoff(
- partial(
- self._make_create_pvc_request, pvc, self.k8s_api_request_timeout
- ),
- f'Could not create PVC {self.pvc_name}',
- # Each req should be given k8s_api_request_timeout seconds.
- timeout=self.k8s_api_request_retry_timeout,
- ))
+ partial(
+ self._make_create_pvc_request, pvc, self.k8s_api_request_timeout
+ ),
+ f'Could not create PVC {self.pvc_name}',
+ # Each req should be given k8s_api_request_timeout seconds.
+ timeout=self.k8s_api_request_retry_timeout,
+ ))
diff --git a/applications/jupyterhub/update.patch b/applications/jupyterhub/update.patch
new file mode 100644
index 000000000..5241525b2
--- /dev/null
+++ b/applications/jupyterhub/update.patch
@@ -0,0 +1,5845 @@
+diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
+index d961d03..d7d67d4 100755
+--- a/applications/jupyterhub/README.md
++++ b/applications/jupyterhub/README.md
+@@ -31,3 +31,13 @@ To support the pre pulling of task images see (https://github.com/MetaCell/cloud
+ the template `templates/image-puller/_helpers-daemonset.tpl` has been changed (see line 167 and on)
+
+ TODO: remember to implement/revise this code after you have updated/changed the templates of JupyterHub
++
++## How to update
++
++The helm chart is based on the [zero-to-jupyterhub](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/) helm chart.
++
++1. Run update.sh [TAG] # Do not use latest!
++2. Restore from the diff files with EDIT: CLOUDHARNESS
++
++Customize notebook image: quay.io/jupyterhub/k8s-singleuser-sample:[TAG]
++
+diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+index d4b3cee..8ec801e 100755
+--- a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
++++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
+@@ -1,9 +1,17 @@
++# load the config object (satisfies linters)
++c = get_config() # noqa
++
++import glob
+ import os
+ import re
+ import sys
+-import logging
+
++from jupyterhub.utils import url_path_join
++from kubernetes_asyncio import client
+ from tornado.httpclient import AsyncHTTPClient
++
++#CLOUDHARNESS: EDIT START
++import logging
+ from kubernetes import client
+ from jupyterhub.utils import url_path_join
+
+@@ -12,7 +20,7 @@ try:
+ harness_hub() # activates harness hooks on jupyterhub
+ except Exception as e:
+ logging.error("could not import harness_jupyter", exc_info=True)
+-
++# CLOUDHARNESS: EDIT END
+
+ # Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
+ configuration_directory = os.path.dirname(os.path.realpath(__file__))
+@@ -20,39 +28,13 @@ sys.path.insert(0, configuration_directory)
+
+ from z2jh import (
+ get_config,
+- set_config_if_not_none,
+ get_name,
+ get_name_env,
+ get_secret_value,
++ set_config_if_not_none,
+ )
+
+
+-print('Base url is', c.JupyterHub.get('base_url', '/'))
+-
+-# Configure JupyterHub to use the curl backend for making HTTP requests,
+-# rather than the pure-python implementations. The default one starts
+-# being too slow to make a large number of requests to the proxy API
+-# at the rate required.
+-AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+-
+-c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
+-
+-# Connect to a proxy running in a different pod
+-c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
+-c.ConfigurableHTTPProxy.should_start = False
+-
+-# Do not shut down user pods when hub is restarted
+-c.JupyterHub.cleanup_servers = False
+-
+-# Check that the proxy has routes appropriately setup
+-c.JupyterHub.last_activity_interval = 60
+-
+-# Don't wait at all before redirecting a spawning user to the progress page
+-c.JupyterHub.tornado_settings = {
+- 'slow_spawn_timeout': 0,
+-}
+-
+-
+ def camelCaseify(s):
+ """convert snake_case to camelCase
+
+@@ -173,6 +155,7 @@ for trait, cfg_key in (
+ ("events_enabled", "events"),
+ ("extra_labels", None),
+ ("extra_annotations", None),
++ # ("allow_privilege_escalation", None), # Managed manually below
+ ("uid", None),
+ ("fs_gid", None),
+ ("service_account", "serviceAccountName"),
+@@ -206,10 +189,19 @@ image = get_config("singleuser.image.name")
+ if image:
+ tag = get_config("singleuser.image.tag")
+ if tag:
+- image = "{}:{}".format(image, tag)
++ image = f"{image}:{tag}"
+
+ c.KubeSpawner.image = image
+
++# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a
++# property where None, False, and True all are valid values that users of the
++# Helm chart may want to set, we can't use the set_config_if_not_none helper
++# function as someone may want to override the default False value to None.
++#
++c.KubeSpawner.allow_privilege_escalation = get_config(
++ "singleuser.allowPrivilegeEscalation"
++)
++
+ # Combine imagePullSecret.create (single), imagePullSecrets (list), and
+ # singleuser.image.pullSecrets (list).
+ image_pull_secrets = []
+@@ -255,7 +247,7 @@ if match_node_purpose:
+ pass
+ else:
+ raise ValueError(
+- "Unrecognized value for matchNodePurpose: %r" % match_node_purpose
++ f"Unrecognized value for matchNodePurpose: {match_node_purpose}"
+ )
+
+ # Combine the common tolerations for user pods with singleuser tolerations
+@@ -271,7 +263,7 @@ if storage_type == "dynamic":
+ pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
+ c.KubeSpawner.pvc_name_template = pvc_name_template
+ volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
+- c.KubeSpawner.storage_pvc_ensure = False
++ c.KubeSpawner.storage_pvc_ensure = True
+ set_config_if_not_none(
+ c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass"
+ )
+@@ -354,41 +346,62 @@ c.KubeSpawner.volume_mounts.extend(
+ )
+
+ c.JupyterHub.services = []
++c.JupyterHub.load_roles = []
+
++# jupyterhub-idle-culler's permissions are scoped to what it needs only, see
++# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions.
++#
+ if get_config("cull.enabled", False):
++ jupyterhub_idle_culler_role = {
++ "name": "jupyterhub-idle-culler",
++ "scopes": [
++ "list:users",
++ "read:users:activity",
++ "read:servers",
++ "delete:servers",
++ # "admin:users", # dynamically added if --cull-users is passed
++ ],
++ # assign the role to a jupyterhub service, so it gains these permissions
++ "services": ["jupyterhub-idle-culler"],
++ }
++
+ cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"]
+ base_url = c.JupyterHub.get("base_url", "/")
+ cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api"))
+
+ cull_timeout = get_config("cull.timeout")
+ if cull_timeout:
+- cull_cmd.append("--timeout=%s" % cull_timeout)
++ cull_cmd.append(f"--timeout={cull_timeout}")
+
+ cull_every = get_config("cull.every")
+ if cull_every:
+- cull_cmd.append("--cull-every=%s" % cull_every)
++ cull_cmd.append(f"--cull-every={cull_every}")
+
+ cull_concurrency = get_config("cull.concurrency")
+ if cull_concurrency:
+- cull_cmd.append("--concurrency=%s" % cull_concurrency)
++ cull_cmd.append(f"--concurrency={cull_concurrency}")
+
+ if get_config("cull.users"):
+ cull_cmd.append("--cull-users")
++ jupyterhub_idle_culler_role["scopes"].append("admin:users")
++
++ if not get_config("cull.adminUsers"):
++ cull_cmd.append("--cull-admin-users=false")
+
+ if get_config("cull.removeNamedServers"):
+ cull_cmd.append("--remove-named-servers")
+
+ cull_max_age = get_config("cull.maxAge")
+ if cull_max_age:
+- cull_cmd.append("--max-age=%s" % cull_max_age)
++ cull_cmd.append(f"--max-age={cull_max_age}")
+
+ c.JupyterHub.services.append(
+ {
+- "name": "cull-idle",
+- "admin": True,
++ "name": "jupyterhub-idle-culler",
+ "command": cull_cmd,
+ }
+ )
++ c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role)
+
+ for key, service in get_config("hub.services", {}).items():
+ # c.JupyterHub.services is a list of dicts, but
+@@ -402,26 +415,44 @@ for key, service in get_config("hub.services", {}).items():
+
+ c.JupyterHub.services.append(service)
+
++for key, role in get_config("hub.loadRoles", {}).items():
++ # c.JupyterHub.load_roles is a list of dicts, but
++ # hub.loadRoles is a dict of dicts to make the config mergable
++ role.setdefault("name", key)
++
++ c.JupyterHub.load_roles.append(role)
++
++# respect explicit null command (distinct from unspecified)
++# this avoids relying on KubeSpawner.cmd's default being None
++_unspecified = object()
++specified_cmd = get_config("singleuser.cmd", _unspecified)
++if specified_cmd is not _unspecified:
++ c.Spawner.cmd = specified_cmd
+
+-set_config_if_not_none(c.Spawner, "cmd", "singleuser.cmd")
+ set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl")
+
+-cloud_metadata = get_config("singleuser.cloudMetadata", {})
++cloud_metadata = get_config("singleuser.cloudMetadata")
+
+ if cloud_metadata.get("blockWithIptables") == True:
+ # Use iptables to block access to cloud metadata by default
+ network_tools_image_name = get_config("singleuser.networkTools.image.name")
+ network_tools_image_tag = get_config("singleuser.networkTools.image.tag")
++ network_tools_resources = get_config("singleuser.networkTools.resources")
++ ip = cloud_metadata["ip"]
+ ip_block_container = client.V1Container(
+ name="block-cloud-metadata",
+ image=f"{network_tools_image_name}:{network_tools_image_tag}",
+ command=[
+ "iptables",
+- "-A",
++ "--append",
+ "OUTPUT",
+- "-d",
+- cloud_metadata.get("ip", "169.254.169.254"),
+- "-j",
++ "--protocol",
++ "tcp",
++ "--destination",
++ ip,
++ "--destination-port",
++ "80",
++ "--jump",
+ "DROP",
+ ],
+ security_context=client.V1SecurityContext(
+@@ -429,6 +460,7 @@ if cloud_metadata.get("blockWithIptables") == True:
+ run_as_user=0,
+ capabilities=client.V1Capabilities(add=["NET_ADMIN"]),
+ ),
++ resources=network_tools_resources,
+ )
+
+ c.KubeSpawner.init_containers.append(ip_block_container)
+@@ -438,17 +470,6 @@ if get_config("debug.enabled", False):
+ c.JupyterHub.log_level = "DEBUG"
+ c.Spawner.debug = True
+
+-# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
+-config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
+-if os.path.isdir(config_dir):
+- for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
+- file_name = os.path.basename(file_path)
+- print(f"Loading {config_dir} config: {file_name}")
+- with open(file_path) as f:
+- file_content = f.read()
+- # compiling makes debugging easier: https://stackoverflow.com/a/437857
+- exec(compile(source=file_content, filename=file_name, mode="exec"))
+-
+ # load potentially seeded secrets
+ #
+ # NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable
+@@ -471,11 +492,23 @@ for app, cfg in get_config("hub.config", {}).items():
+ cfg.pop("keys", None)
+ c[app].update(cfg)
+
++# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
++config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
++if os.path.isdir(config_dir):
++ for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
++ file_name = os.path.basename(file_path)
++ print(f"Loading {config_dir} config: {file_name}")
++ with open(file_path) as f:
++ file_content = f.read()
++ # compiling makes debugging easier: https://stackoverflow.com/a/437857
++ exec(compile(source=file_content, filename=file_name, mode="exec"))
++
+ # execute hub.extraConfig entries
+ for key, config_py in sorted(get_config("hub.extraConfig", {}).items()):
+- print("Loading extra config: %s" % key)
++ print(f"Loading extra config: {key}")
+ exec(config_py)
+
++# CLOUDHARNESS: EDIT START
+ # Allow switching authenticators easily
+ auth_type = get_config('hub.config.JupyterHub.authenticator_class')
+ email_domain = 'local'
+@@ -525,4 +558,5 @@ set_config_if_not_none(c.Authenticator, 'whitelist', 'auth.whitelist.users')
+ c.apps = get_config('apps')
+ c.registry = get_config('registry')
+ c.domain = get_config('root.domain')
+-c.namespace = get_config('root.namespace')
+\ No newline at end of file
++c.namespace = get_config('root.namespace')
++# CLOUDHARNESS: EDIT END
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
+index 834a6b6..fc368f6 100755
+--- a/applications/jupyterhub/deploy/resources/hub/z2jh.py
++++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
+@@ -3,15 +3,15 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
+
+ Methods here can be imported by extraConfig in values.yaml
+ """
+-from collections import Mapping
+-from functools import lru_cache
+ import os
+-import re
++from collections.abc import Mapping
++from functools import lru_cache
+
+ import yaml
+
++
+ # memoize so we only load config once
+-@lru_cache()
++@lru_cache
+ def _load_config():
+ """Load the Helm chart configuration used to render the Helm templates of
+ the chart from a mounted k8s Secret, and merge in values from an optionally
+@@ -27,6 +27,7 @@ def _load_config():
+ cfg = _merge_dictionaries(cfg, values)
+ else:
+ print(f"No config at {path}")
++ # EDIT: CLOUDHARNESS START
+ path = f"/opt/cloudharness/resources/allvalues.yaml"
+ if os.path.exists(path):
+ print("Loading global CloudHarness config at", path)
+@@ -34,11 +35,11 @@ def _load_config():
+ values = yaml.safe_load(f)
+ cfg = _merge_dictionaries(cfg, values)
+ cfg['root'] = values
+-
++ # EDIT: CLOUDHARNESS END
+ return cfg
+
+
+-@lru_cache()
++@lru_cache
+ def _get_config_value(key):
+ """Load value from the k8s ConfigMap given a key."""
+
+@@ -50,7 +51,7 @@ def _get_config_value(key):
+ raise Exception(f"{path} not found!")
+
+
+-@lru_cache()
++@lru_cache
+ def get_secret_value(key, default="never-explicitly-set"):
+ """Load value from the user managed k8s Secret or the default k8s Secret
+ given a key."""
+@@ -117,7 +118,7 @@ def get_config(key, default=None):
+ else:
+ value = value[level]
+
+-
++ # EDIT: CLOUDHARNESS START
+ if value and isinstance(value, str):
+ replace_var = re.search("{{.*?}}", value)
+ if replace_var:
+@@ -128,6 +129,7 @@ def get_config(key, default=None):
+ if repl:
+ print("replace", variable, "in", value, ":", repl)
+ value = re.sub("{{.*?}}", repl, value)
++ # EDIT: CLOUDHARNESS END
+ return value
+
+
+@@ -137,6 +139,5 @@ def set_config_if_not_none(cparent, name, key):
+ configuration item if not None
+ """
+ data = get_config(key)
+-
+ if data is not None:
+- setattr(cparent, name, data)
+\ No newline at end of file
++ setattr(cparent, name, data)
+diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt
+new file mode 100644
+index 0000000..9769a9c
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/NOTES.txt
+@@ -0,0 +1,158 @@
++{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
++
++{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
++. __ __ __ __ __
++ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
++ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
++/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
++\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
++ /_/ /____/
++
++ You have successfully installed the official JupyterHub Helm chart!
++
++### Installation info
++
++ - Kubernetes namespace: {{ .Release.Namespace }}
++ - Helm release name: {{ .Release.Name }}
++ - Helm chart version: {{ .Chart.Version }}
++ - JupyterHub version: {{ .Chart.AppVersion }}
++ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
++
++### Followup links
++
++ - Documentation: https://z2jh.jupyter.org
++ - Help forum: https://discourse.jupyter.org
++ - Social chat: https://gitter.im/jupyterhub/jupyterhub
++ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
++
++### Post-installation checklist
++
++ - Verify that created Pods enter a Running state:
++
++ kubectl --namespace={{ .Release.Namespace }} get pod
++
++ If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} describe pod
++
++ If a pod keeps restarting, diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} logs --previous
++ {{- println }}
++
++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer" }}
++ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
++
++ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
++
++ If the external ip remains , diagnose with:
++
++ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
++ {{- end }}
++
++ - Verify web based access:
++ {{- println }}
++ {{- if .Values.apps.jupyterhub.ingress.enabled }}
++ {{- range $host := .Values.apps.jupyterhub.ingress.hosts }}
++ Try insecure HTTP access: http://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
++ {{- end }}
++
++ {{- range $tls := .Values.apps.jupyterhub.ingress.tls }}
++ {{- range $host := $tls.hosts }}
++ Try secure HTTPS access: https://{{ $host }}{{ $.Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/
++ {{- end }}
++ {{- end }}
++ {{- else }}
++ You have not configured a k8s Ingress resource so you need to access the k8s
++ Service {{ $proxy_service }} directly.
++ {{- println }}
++
++ {{- if eq .Values.apps.jupyterhub.proxy.service.type "NodePort" }}
++ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
++ that all the k8s cluster's nodes are exposing the k8s Service via those
++ ports.
++
++ Try insecure HTTP access: http://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
++ Try secure HTTPS access: https://:{{ .Values.apps.jupyterhub.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
++
++ {{- else }}
++ If your computer is outside the k8s cluster, you can port-forward traffic to
++ the k8s Service {{ $proxy_service }} with kubectl to access it from your
++ computer.
++
++ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
++
++ Try insecure HTTP access: http://localhost:8080
++ {{- end }}
++ {{- end }}
++ {{- println }}
++
++
++
++
++
++{{- /*
++ Warnings for likely misconfigurations
++*/}}
++
++{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }}
++#################################################################################
++###### WARNING: You are using user placeholders without pod priority #####
++###### enabled*, either enable pod priority or stop using the #####
++###### user placeholders** to avoid having placeholders that #####
++###### refuse to make room for a real user. #####
++###### #####
++###### *scheduling.podPriority.enabled #####
++###### **scheduling.userPlaceholder.enabled #####
++###### **scheduling.userPlaceholder.replicas #####
++#################################################################################
++{{- println }}
++{{- end }}
++
++
++
++
++
++{{- /*
++ Breaking changes and failures for likely misconfigurations.
++*/}}
++
++{{- $breaking := "" }}
++{{- $breaking_title := "\n" }}
++{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
++{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
++{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
++{{- $breaking_title = print $breaking_title "\n###### #####" }}
++{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
++{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
++{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
++
++
++{{- /*
++ This is an example (in a helm template comment) on how to detect and
++ communicate with regards to a breaking chart config change.
++
++ {{- if hasKey .Values.apps.jupyterhub.singleuser.cloudMetadata "enabled" }}
++ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
++ {{- end }}
++*/}}
++
++
++{{- if hasKey .Values.apps.jupyterhub.rbac "enabled" }}
++{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }}
++{{- end }}
++
++
++{{- if hasKey .Values.apps.jupyterhub.hub "fsGid" }}
++{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
++{{- end }}
++
++
++{{- if and .Values.apps.jupyterhub.singleuser.cloudMetadata.blockWithIptables (and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled .Values.apps.jupyterhub.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }}
++{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }}
++{{- end }}
++
++
++{{- if $breaking }}
++{{- fail (print $breaking_title $breaking "\n\n") }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+index b742a12..3159d10 100644
+--- a/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers-auth-rework.tpl
+@@ -168,30 +168,30 @@ ldap.dn.user.useLookupName: LDAPAuthenticator.use_lookup_dn_username
+ {{- $c := dict }}
+ {{- $result := (dict "hub" (dict "config" $c)) }}
+ {{- /*
+- Flattens the config in .Values.apps.jupyterhub.auth to a format of
++ Flattens the config in .Values.apps.jupyterhub.apps.jupyterhub.auth to a format of
+ "keyX.keyY...": "value". Writes output to $c.
+ */}}
+- {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.auth "type" "custom")) }}
++ {{- include "jupyterhub.flattenDict" (list $c (omit .Values.apps.jupyterhub.apps.jupyterhub.auth "type" "custom")) }}
+
+ {{- /*
+ Transform the flattened config using a dictionary
+ representing the old z2jh config, output the result
+ in $c.
+ */}}
+- {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.global.safeToShowValues) }}
++ {{- include "jupyterhub.authDep.remapOldToNew.mappable" (list $c .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub. }}
+
+- {{- $class_old_config_key := .Values.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
++ {{- $class_old_config_key := .Values.apps.jupyterhub.apps.jupyterhub.auth.type | default "" }} {{- /* ldap - github */}}
+ {{- $class_new_entrypoint := "" }} {{- /* ldapauthenticator.LDAPAuthenticator - github */}}
+ {{- $class_new_config_key := "" }} {{- /* LDAPAuthenticator - GitHubOAuthenticator */}}
+
+ {{- /* SET $class_new_entrypoint, $class_new_config_key */}}
+ {{- if eq $class_old_config_key "custom" }}
+- {{- $class_new_entrypoint = .Values.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
++ {{- $class_new_entrypoint = .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.className | default "custom.className wasn't configured!" }}
+ {{- $class_new_config_key = $class_new_entrypoint | splitList "." | last }}
+ {{- /* UPDATE c dict explicitly with auth.custom.config */}}
+- {{- if .Values.apps.jupyterhub.auth.custom.config }}
+- {{- $custom_config := merge (dict) .Values.apps.jupyterhub.auth.custom.config }}
+- {{- if not .Values.apps.jupyterhub.global.safeToShowValues }}
++ {{- if .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
++ {{- $custom_config := merge (dict) .Values.apps.jupyterhub.apps.jupyterhub.auth.custom.config }}
++ {{- if not .Values.apps.jupyterhub.apps.jupyterhub.global.safeToSho.Values.apps.jupyterhub.}}
+ {{- range $key, $val := $custom_config }}
+ {{- $_ := set $custom_config $key "***" }}
+ {{- end }}
+@@ -213,7 +213,7 @@ The JupyterHub Helm chart's auth config has been reworked and requires changes.
+
+ The new way to configure authentication in chart version 0.11.0+ is printed
+ below for your convenience. The values are not shown by default to ensure no
+-secrets are exposed, run helm upgrade with --set global.safeToShowValues=true
++secrets are exposed, run helm upgrade with --set global.safeToSho.Values.apps.jupyterhub.true
+ to show them.
+
+ {{ $result | toYaml }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-names.tpl b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+index e9cf7bb..401d601 100644
+--- a/applications/jupyterhub/deploy/templates/_helpers-names.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers-names.tpl
+@@ -3,8 +3,8 @@
+ parent charts to reference these dynamic resource names.
+
+ To avoid duplicating documentation, for more information, please see the the
+- fullnameOverride entry in schema.yaml or the configuration reference that
+- schema.yaml renders to.
++ fullnameOverride entry in values.schema.yaml or the configuration reference
++ that values.schema.yaml renders to.
+
+ https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
+ */}}
+@@ -38,8 +38,8 @@
+ {{- $name_override := .Values.apps.jupyterhub.nameOverride }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+ {{- if .Values.apps.jupyterhub.jupyterhub }}
+- {{- $fullname_override = .Values.apps.jupyterhub.fullnameOverride }}
+- {{- $name_override = .Values.apps.jupyterhub.nameOverride }}
++ {{- $fullname_override = .Values.apps.jupyterhub.jupyterhub.fullnameOverride }}
++ {{- $name_override = .Values.apps.jupyterhub.jupyterhub.nameOverride }}
+ {{- end }}
+ {{- end }}
+
+@@ -76,12 +76,23 @@
+ {{- include "jupyterhub.fullname.dash" . }}hub
+ {{- end }}
+
++{{- /* hub-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.hub.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.hub.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* hub-existing-secret Secret */}}
+ {{- define "jupyterhub.hub-existing-secret.fullname" -}}
+ {{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
+ {{- $existing_secret := .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+- {{- $existing_secret = .Values.apps.jupyterhub.hub.existingSecret }}
++ {{- if .Values.apps.jupyterhub.jupyterhub }}
++ {{- $existing_secret = .Values.apps.jupyterhub.jupyterhub.hub.existingSecret }}
++ {{- end }}
+ {{- end }}
+ {{- if $existing_secret }}
+ {{- $existing_secret }}
+@@ -133,11 +144,29 @@
+ {{- include "jupyterhub.fullname.dash" . }}autohttps
+ {{- end }}
+
++{{- /* autohttps-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.proxy.traefik.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.proxy.traefik.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler Deployment */}}
+ {{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}user-scheduler
+ {{- end }}
+
++{{- /* user-scheduler-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler leader election lock resource */}}
+ {{- define "jupyterhub.user-scheduler-lock.fullname" -}}
+ {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
+@@ -153,6 +182,15 @@
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
+ {{- end }}
+
++{{- /* image-awaiter-serviceaccount ServiceAccount */}}
++{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
++ {{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create }}
++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
++ {{- else }}
++ {{- .Values.apps.jupyterhub.prePuller.hook.serviceAccount.name | default "default" }}
++ {{- end }}
++{{- end }}
++
+ {{- /* hook-image-puller DaemonSet */}}
+ {{- define "jupyterhub.hook-image-puller.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-puller
+@@ -210,6 +248,15 @@
+ {{- end }}
+ {{- end }}
+
++{{- /* image-puller Priority */}}
++{{- define "jupyterhub.image-puller-priority.fullname" -}}
++ {{- if (include "jupyterhub.fullname" .) }}
++ {{- include "jupyterhub.fullname.dash" . }}image-puller
++ {{- else }}
++ {{- .Release.Name }}-image-puller-priority
++ {{- end }}
++{{- end }}
++
+ {{- /* user-scheduler's registered name */}}
+ {{- define "jupyterhub.user-scheduler.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+@@ -231,6 +278,7 @@
+ fullname: {{ include "jupyterhub.fullname" . | quote }}
+ fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
+ hub: {{ include "jupyterhub.hub.fullname" . | quote }}
++hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
+ hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
+ hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
+ hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
+@@ -241,10 +289,14 @@ proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
+ proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
+ proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
+ autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
++autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
+ user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
++user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
+ user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
+ user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
++image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
+ hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
++hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
+ hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
+ continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
+ singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
+new file mode 100644
+index 0000000..4075569
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/_helpers-netpol.tpl
+@@ -0,0 +1,101 @@
++{{- /*
++ This named template renders egress rules for NetworkPolicy resources based on
++ common configuration.
++
++ It is rendering based on the `egressAllowRules` and `egress` keys of the
++ passed networkPolicy config object. Each flag set to true under
++ `egressAllowRules` is rendered to a egress rule that next to any custom user
++ defined rules from the `egress` config.
++
++ This named template needs to render based on a specific networkPolicy
++ resource, but also needs access to the root context. Due to that, it
++ accepts a list as its scope, where the first element is supposed to be the
++ root context and the second element is supposed to be the networkPolicy
++ configuration object.
++
++ As an example, this is how you would render this named template from a
++ NetworkPolicy resource under its egress:
++
++ egress:
++ # other rules here...
++
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
++ {{- . | nindent 4 }}
++ {{- end }}
++
++ Note that the reference to privateIPs and nonPrivateIPs relate to
++ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
++*/}}
++
++{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
++{{- $root := index . 0 }}
++{{- $netpol := index . 1 }}
++{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }}
++- ports:
++ - port: 53
++ protocol: UDP
++ - port: 53
++ protocol: TCP
++ to:
++ {{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }}
++ # Allow outbound connections to DNS ports on the cloud metadata server
++ - ipBlock:
++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++ {{- end }}
++ {{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }}
++ # Allow outbound connections to DNS ports on pods in the kube-system
++ # namespace
++ - namespaceSelector:
++ matchLabels:
++ kubernetes.io/metadata.name: kube-system
++ {{- end }}
++ {{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
++ # Allow outbound connections to DNS ports on destinations in the private IP
++ # ranges
++ - ipBlock:
++ cidr: 10.0.0.0/8
++ - ipBlock:
++ cidr: 172.16.0.0/12
++ - ipBlock:
++ cidr: 192.168.0.0/16
++ {{- end }}
++{{- end }}
++
++{{- if $netpol.egressAllowRules.nonPrivateIPs }}
++# Allow outbound connections to non-private IP ranges
++- to:
++ - ipBlock:
++ cidr: 0.0.0.0/0
++ except:
++ # As part of this rule:
++ # - don't allow outbound connections to private IPs
++ - 10.0.0.0/8
++ - 172.16.0.0/12
++ - 192.168.0.0/16
++ # - don't allow outbound connections to the cloud metadata server
++ - {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++{{- end }}
++
++{{- if $netpol.egressAllowRules.privateIPs }}
++# Allow outbound connections to private IP ranges
++- to:
++ - ipBlock:
++ cidr: 10.0.0.0/8
++ - ipBlock:
++ cidr: 172.16.0.0/12
++ - ipBlock:
++ cidr: 192.168.0.0/16
++{{- end }}
++
++{{- if $netpol.egressAllowRules.cloudMetadataServer }}
++# Allow outbound connections to the cloud metadata server
++- to:
++ - ipBlock:
++ cidr: {{ $root.Values.apps.jupyterhub.singleuser.cloudMetadata.ip }}/32
++{{- end }}
++
++{{- with $netpol.egress }}
++# Allow outbound connections based on user specified rules
++{{ . | toYaml }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
+index efea86d..a202363 100755
+--- a/applications/jupyterhub/deploy/templates/_helpers.tpl
++++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
+@@ -12,7 +12,7 @@
+
+ When you ask a helper to render its content, one often forward the current
+ scope to the helper in order to allow it to access .Release.Name,
+- .Values.apps.jupyterhub.rbac.enabled and similar values.
++ .Values.apps.jupyterhub.rbac.create and similar values.
+
+ #### Example - Passing the current scope
+ {{ include "jupyterhub.commonLabels" . }}
+@@ -180,8 +180,51 @@ component: {{ include "jupyterhub.componentLabel" . }}
+ Augments passed .pullSecrets with $.Values.apps.jupyterhub.imagePullSecrets
+ */}}
+ {{- define "jupyterhub.imagePullSecrets" -}}
++ {{- /*
++ We have implemented a trick to allow a parent chart depending on this
++ chart to call this named templates.
++
++ Caveats and notes:
++
++ 1. While parent charts can reference these, grandparent charts can't.
++ 2. Parent charts must not use an alias for this chart.
++ 3. There is no failsafe workaround to above due to
++ https://github.com/helm/helm/issues/9214.
++ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
++ using "toYaml | fromYaml" in order to be able to use normal helm
++ template functions on it.
++ */}}
++ {{- $jupyterhub_values := .root.Values.apps.jupyterhub.}}
++ {{- if ne .root.Chart.Name "jupyterhub" }}
++ {{- if .root.Values.apps.jupyterhub.jupyterhub }}
++ {{- $jupyterhub_values = .root.Values.apps.jupyterhub.jupyterhub }}
++ {{- end }}
++ {{- end }}
+
++ {{- /* Populate $_.list with all relevant entries */}}
++ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
++ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
++ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
++ {{- end }}
+
++ {{- /* Decide if something should be written */}}
++ {{- if not (eq ($_.list | toJson) "[]") }}
++
++ {{- /* Process the $_.list where strings become dicts with a name key and the
++ strings become the name keys' values into $_.res */}}
++ {{- $_ := set $_ "res" list }}
++ {{- range $_.list }}
++ {{- if eq (typeOf .) "string" }}
++ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
++ {{- else }}
++ {{- $__ := set $_ "res" (append $_.res .) }}
++ {{- end }}
++ {{- end }}
++
++ {{- /* Write the results */}}
++ {{- $_.res | toJson }}
++
++ {{- end }}
+ {{- end }}
+
+ {{- /*
+@@ -339,3 +382,21 @@ limits:
+ {{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
+ {{- end }}
+ {{- end }}
++
++{{- /*
++ jupyterhub.chart-version-to-git-ref:
++ Renders a valid git reference from a chartpress generated version string.
++ In practice, either a git tag or a git commit hash will be returned.
++
++ - The version string will follow a chartpress pattern, see
++ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
++
++ - The regexReplaceAll function is a sprig library function, see
++ https://masterminds.github.io/sprig/strings.html.
++
++ - The regular expression is in golang syntax, but \d had to become \\d for
++ example.
++*/}}
++{{- define "jupyterhub.chart-version-to-git-ref" -}}
++{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+index c913f67..f52feb6 100755
+--- a/applications/jupyterhub/deploy/templates/hub/configmap.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
+@@ -29,5 +29,6 @@ data:
+ */}}
+ checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
+
++ # EDIT: CLOUDHARNESS
+ allvalues.yaml: |
+ {{- .Values | toYaml | nindent 4 }}
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+index 82132c6..d105ecc 100755
+--- a/applications/jupyterhub/deploy/templates/hub/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
+@@ -5,6 +5,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.hub.revisionHistoryLimit }}
++ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+@@ -30,11 +33,14 @@ spec:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+-{{ include "deploy_utils.etcHosts" . | indent 6 }}
++{{ include "deploy_utils.etcHosts" . | indent 6 }} # EDIT: CLOUDHARNESS
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.hub.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.hub.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -44,7 +50,7 @@ spec:
+ - name: config
+ configMap:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+- {{- /* This is needed by cloudharness libraries */}}
++ {{- /* EDIT: CLOUDHARNESS This is needed by cloudharness libraries */}}
+ - name: cloudharness-allvalues
+ configMap:
+ name: cloudharness-allvalues
+@@ -82,11 +88,13 @@ spec:
+ persistentVolumeClaim:
+ claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
+ {{- end }}
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.hub.fullname" . }}
++ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
+ {{- end }}
++ {{- with .Values.apps.jupyterhub.hub.podSecurityContext }}
+ securityContext:
+- fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.hub.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+@@ -153,14 +161,14 @@ spec:
+ name: config
+ - mountPath: /usr/local/etc/jupyterhub/secret/
+ name: secret
+- - name: cloudharness-allvalues
++ - name: cloudharness-allvalues # EDIT: CLOUDHARNESS START
+ mountPath: /opt/cloudharness/resources/allvalues.yaml
+ subPath: allvalues.yaml
+ {{- if .Values.apps.accounts }}
+ - name: cloudharness-kc-accounts
+ mountPath: /opt/cloudharness/resources/auth
+ readOnly: true
+- {{- end }}
++ {{- end }} # EDIT: CLOUDHARNESS END
+ {{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
+ - mountPath: /usr/local/etc/jupyterhub/existing-secret/
+ name: existing-secret
+diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+index 9a7a6bc..d9508e2 100755
+--- a/applications/jupyterhub/deploy/templates/hub/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
+@@ -61,31 +61,24 @@ spec:
+
+ egress:
+ # hub --> proxy
+- - ports:
+- - port: 8001
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8001
++
+ # hub --> singleuser-server
+- - ports:
+- - port: 8888
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8888
+
+- # hub --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
+-
+- {{- with .Values.apps.jupyterhub.hub.networkPolicy.egress }}
+- # hub --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.hub.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+index 855609d..bb6c7b1 100755
+--- a/applications/jupyterhub/deploy/templates/hub/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if .Values.apps.jupyterhub.hub.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+index 738daab..1b689af 100755
+--- a/applications/jupyterhub/deploy/templates/hub/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
+@@ -1,15 +1,4 @@
+-{{- if .Values.apps.jupyterhub.rbac.enabled -}}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.hub.fullname" . }}
+- {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+----
++{{- if .Values.apps.jupyterhub.rbac.create -}}
+ kind: Role
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -32,7 +21,7 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.hub.fullname" . }}
++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: Role
+diff --git a/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
+new file mode 100644
+index 0000000..817ed66
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/hub/serviceaccount.yaml
+@@ -0,0 +1,12 @@
++{{- if .Values.apps.jupyterhub.hub.serviceAccount.create -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
++ {{- with .Values.apps.jupyterhub.hub.serviceAccount.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-pull-secret.yaml b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
+new file mode 100644
+index 0000000..b7544db
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-pull-secret.yaml
+@@ -0,0 +1,15 @@
++{{- if .Values.apps.jupyterhub.imagePullSecret.create }}
++kind: Secret
++apiVersion: v1
++metadata:
++ name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ annotations:
++ "helm.sh/hook": pre-install,pre-upgrade
++ "helm.sh/hook-delete-policy": before-hook-creation
++ "helm.sh/hook-weight": "-20"
++type: kubernetes.io/dockerconfigjson
++data:
++ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+index e16fd1a..528345c 100644
+--- a/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
++++ b/applications/jupyterhub/deploy/templates/image-puller/_helpers-daemonset.tpl
+@@ -34,6 +34,9 @@ spec:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 100%
++ {{- if typeIs "int" .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.prePuller.revisionHistoryLimit }}
++ {{- end }}
+ template:
+ metadata:
+ labels:
+@@ -44,13 +47,17 @@ spec:
+ {{- end }}
+ spec:
+ {{- /*
+- continuous-image-puller pods are made evictable to save on the k8s pods
+- per node limit all k8s clusters have.
++ image-puller pods are made evictable to save on the k8s pods
++ per node limit all k8s clusters have and have a higher priority
++ than user-placeholder pods that could block an entire node.
+ */}}
+- {{- if and (not .hook) .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+- priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
++ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
++ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations .Values.apps.jupyterhub.prePuller.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -127,6 +134,7 @@ spec:
+ {{- /* --- Conditionally pull profileList images --- */}}
+ {{- if .Values.apps.jupyterhub.prePuller.pullProfileListImages }}
+ {{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }}
++ {{- /* profile's kubespawner_override */}}
+ {{- if $container.kubespawner_override }}
+ {{- if $container.kubespawner_override.image }}
+ - name: image-pull-singleuser-profilelist-{{ $k }}
+@@ -145,13 +153,15 @@ spec:
+ {{- end }}
+ {{- end }}
+ {{- end }}
+- {{- end }}
+- {{- end }}
+-
+- {{- /* --- Pull extra images --- */}}
+- {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
+- - name: image-pull-{{ $k }}
+- image: {{ $v.name }}:{{ $v.tag }}
++ {{- /* kubespawner_override in profile's profile_options */}}
++ {{- if $container.profile_options }}
++ {{- range $option, $option_spec := $container.profile_options }}
++ {{- if $option_spec.choices }}
++ {{- range $choice, $choice_spec := $option_spec.choices }}
++ {{- if $choice_spec.kubespawner_override }}
++ {{- if $choice_spec.kubespawner_override.image }}
++ - name: image-pull-profile-{{ $k }}-option-{{ $option }}-{{ $choice }}
++ image: {{ $choice_spec.kubespawner_override.image }}
+ command:
+ - /bin/sh
+ - -c
+@@ -163,13 +173,20 @@ spec:
+ {{- with $.Values.apps.jupyterhub.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+- {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
++ {{- end }}
+ {{- end }}
+
+- {{- /* --- Pull CloudHarness tasks images --- */}}
+- {{- range $k, $v := ( index .Values "task-images" ) }}
+- - name: image-pull-{{ $k | replace "-" "" }}
+- image: {{ $v }}
++ {{- /* --- Pull extra images --- */}}
++ {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
++ - name: image-pull-{{ $k }}
++ image: {{ $v.name }}:{{ $v.tag }}
+ command:
+ - /bin/sh
+ - -c
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+index bdd9f63..cc6db3e 100755
+--- a/applications/jupyterhub/deploy/templates/image-puller/job.yaml
++++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
+@@ -28,16 +28,22 @@ spec:
+ labels:
+ {{- /* Changes here will cause the Job to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.prePuller.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with .Values.apps.jupyterhub.prePuller.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ restartPolicy: Never
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
++ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.prePuller.hook.nodeSelector }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.prePuller.hook.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -58,6 +64,7 @@ spec:
+ - -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
+ - -namespace={{ .Release.Namespace }}
+ - -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
++ - -pod-scheduling-wait-duration={{ .Values.apps.jupyterhub.prePuller.hook.podSchedulingWaitDuration }}
+ {{- with .Values.apps.jupyterhub.prePuller.hook.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
+new file mode 100644
+index 0000000..1a3fca3
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-puller/priorityclass.yaml
+@@ -0,0 +1,18 @@
++{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
++{{- if or .Values.apps.jupyterhub.prePuller.hook.enabled .Values.apps.jupyterhub.prePuller.continuous.enabled -}}
++apiVersion: scheduling.k8s.io/v1
++kind: PriorityClass
++metadata:
++ name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
++ annotations:
++ meta.helm.sh/release-name: "{{ .Release.Name }}"
++ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++value: {{ .Values.apps.jupyterhub.scheduling.podPriority.imagePullerPriority }}
++globalDefault: false
++description: >-
++ Enables [hook|continuous]-image-puller pods to fit on nodes even though they
++ are clogged by user-placeholder pods, while not evicting normal user pods.
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+index 95c86dd..5946896 100755
+--- a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
+@@ -1,29 +1,8 @@
+ {{- /*
+ Permissions to be used by the hook-image-awaiter job
+ */}}
+-{{- if .Values.apps.jupyterhub.rbac.enabled }}
+-{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) }}
+-{{- /*
+-This service account...
+-*/ -}}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- hub.jupyter.org/deletable: "true"
+- annotations:
+- "helm.sh/hook": pre-install,pre-upgrade
+- "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+- "helm.sh/hook-weight": "0"
+- {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+----
+-{{- /*
+-... will be used by this role...
+-*/}}
++{{- if .Values.apps.jupyterhub.rbac.create -}}
++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+ kind: Role
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -56,7 +35,7 @@ metadata:
+ "helm.sh/hook-weight": "0"
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: Role
+diff --git a/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
+new file mode 100644
+index 0000000..2e5fa72
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/image-puller/serviceaccount.yaml
+@@ -0,0 +1,21 @@
++{{- /*
++ServiceAccount for the pre-puller hook's image-awaiter-job
++*/}}
++{{- if .Values.apps.jupyterhub.prePuller.hook.serviceAccount.create -}}
++{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ hub.jupyter.org/deletable: "true"
++ annotations:
++ "helm.sh/hook": pre-install,pre-upgrade
++ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
++ "helm.sh/hook-weight": "0"
++ {{- with .Values.apps.jupyterhub.prePuller.hook.serviceAccount.annotations }}
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
+deleted file mode 100755
+index 08bd7bb..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
++++ /dev/null
+@@ -1,9 +0,0 @@
+-# Automatic HTTPS Terminator
+-
+-This directory has Kubernetes objects for automatic Let's Encrypt Support.
+-When enabled, we create a new deployment object that has an nginx-ingress
+-and kube-lego container in it. This is responsible for requesting,
+-storing and renewing certificates as needed from Let's Encrypt.
+-
+-The only change required outside of this directory is in the `proxy-public`
+-service, which targets different hubs based on automatic HTTPS status.
+\ No newline at end of file
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
+deleted file mode 100755
+index 8d71a97..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
++++ /dev/null
+@@ -1,28 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-{{- $_ := .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
+-
+-# This configmap contains Traefik configuration files to be mounted.
+-# - traefik.yaml will only be read during startup (static configuration)
+-# - dynamic.yaml will be read on change (dynamic configuration)
+-#
+-# ref: https://docs.traefik.io/getting-started/configuration-overview/
+-#
+-# The configuration files are first rendered with Helm templating to large YAML
+-# strings. Then we use the fromYAML function on these strings to get an object,
+-# that we in turn merge with user provided extra configuration.
+-#
+-kind: ConfigMap
+-apiVersion: v1
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-data:
+- traefik.yaml: |
+- {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
+- dynamic.yaml: |
+- {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
+-
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
+deleted file mode 100755
+index fcb062f..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
++++ /dev/null
+@@ -1,141 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-apiVersion: apps/v1
+-kind: Deployment
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-spec:
+- replicas: 1
+- selector:
+- matchLabels:
+- {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+- template:
+- metadata:
+- labels:
+- {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+- hub.jupyter.org/network-access-proxy-http: "true"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.labels }}
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- annotations:
+- # Only force a restart through a change to this checksum when the static
+- # configuration is changed, as the dynamic can be updated after start.
+- # Any disruptions to this deployment impacts everything, it is the
+- # entrypoint of all network traffic.
+- checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.apps.jupyterhub.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
+- spec:
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.autohttps.fullname" . }}
+- {{- end }}
+- {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+- priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+- {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.traefik.nodeSelector }}
+- {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.traefik.tolerations }}
+- tolerations:
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+- volumes:
+- - name: certificates
+- emptyDir: {}
+- - name: traefik-config
+- configMap:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumes }}
+- {{- . | toYaml | nindent 8 }}
+- {{- end }}
+- {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.apps.jupyterhub.proxy.traefik.image) }}
+- imagePullSecrets: {{ . }}
+- {{- end }}
+- initContainers:
+- - name: load-acme
+- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- args:
+- - load
+- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+- - acme.json
+- - /etc/acme/acme.json
+- env:
+- # We need this to get logs immediately
+- - name: PYTHONUNBUFFERED
+- value: "True"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
+- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+- {{- end }}
+- volumeMounts:
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- containers:
+- - name: traefik
+- image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.resources }}
+- resources:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- ports:
+- - name: http
+- containerPort: 8080
+- - name: https
+- containerPort: 8443
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPorts }}
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- volumeMounts:
+- - name: traefik-config
+- mountPath: /etc/traefik
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraVolumeMounts }}
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraEnv }}
+- env:
+- {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- - name: secret-sync
+- image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+- imagePullPolicy: {{ . }}
+- {{- end }}
+- args:
+- - watch-save
+- - --label=app={{ include "jupyterhub.appLabel" . }}
+- - --label=release={{ .Release.Name }}
+- - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+- - --label=heritage=secret-sync
+- - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+- - acme.json
+- - /etc/acme/acme.json
+- env:
+- # We need this to get logs immediately
+- - name: PYTHONUNBUFFERED
+- value: "True"
+- volumeMounts:
+- - name: certificates
+- mountPath: /etc/acme
+- {{- with .Values.apps.jupyterhub.proxy.secretSync.containerSecurityContext }}
+- securityContext:
+- {{- . | toYaml | nindent 12 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.extraPodSpec }}
+- {{- . | toYaml | nindent 6 }}
+- {{- end }}
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
+deleted file mode 100755
+index ea43b67..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
++++ /dev/null
+@@ -1,40 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}}
+-apiVersion: rbac.authorization.k8s.io/v1
+-kind: Role
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.proxy.traefik.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+-rules:
+-- apiGroups: [""]
+- resources: ["secrets"]
+- verbs: ["get", "patch", "list", "create"]
+----
+-apiVersion: rbac.authorization.k8s.io/v1
+-kind: RoleBinding
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-subjects:
+-- kind: ServiceAccount
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- apiGroup:
+-roleRef:
+- kind: Role
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- apiGroup: rbac.authorization.k8s.io
+----
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.autohttps.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
+deleted file mode 100755
+index d57c135..0000000
+--- a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
++++ /dev/null
+@@ -1,25 +0,0 @@
+-{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts .Values.apps.jupyterhub.proxy.https.enabled) }}
+-{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+-{{- if $autoHTTPS -}}
+-apiVersion: v1
+-kind: Service
+-metadata:
+- name: {{ include "jupyterhub.proxy-http.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.proxy.service.labels }}
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+- {{- with .Values.apps.jupyterhub.proxy.service.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+-spec:
+- type: ClusterIP
+- selector:
+- {{- $_ := merge (dict "componentLabel" "proxy") . }}
+- {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+- ports:
+- - port: 8000
+- targetPort: http
+-{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+index 6d63ba8..bb37b8f 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
+@@ -7,6 +7,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.proxy.chp.revisionHistoryLimit }}
++ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+@@ -35,7 +38,7 @@ spec:
+ # match the k8s Secret during the first upgrade following an auth_token
+ # was generated.
+ checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
+- checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
++ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum | quote }}
+ {{- with .Values.apps.jupyterhub.proxy.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+@@ -44,7 +47,10 @@ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.proxy.chp.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.proxy.chp.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -135,6 +141,8 @@ spec:
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }}
++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.timeoutSeconds }}
++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+@@ -149,6 +157,8 @@ spec:
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }}
++ timeoutSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.timeoutSeconds }}
++ failureThreshold: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+index adc8277..88a00be 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
+@@ -85,32 +85,24 @@ spec:
+
+ egress:
+ # proxy --> hub
+- - ports:
+- - port: 8081
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8081
+
+ # proxy --> singleuser-server
+- - ports:
+- - port: 8888
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8888
+
+- # proxy --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
+-
+- {{- with .Values.apps.jupyterhub.proxy.chp.networkPolicy.egress }}
+- # proxy --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.proxy.chp.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+index 1846a3b..155895b 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if .Values.apps.jupyterhub.proxy.chp.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.proxy.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
+index 0d9ca5b..f634ba9 100755
+--- a/applications/jupyterhub/deploy/templates/proxy/service.yaml
++++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml
+@@ -35,12 +35,15 @@ metadata:
+ {{- end }}
+ spec:
+ selector:
++ # This service will target the autohttps pod if autohttps is configured, and
++ # the proxy pod if not. When autohttps is configured, the service proxy-http
++ # will be around to target the proxy pod directly.
+ {{- if $autoHTTPS }}
+- component: autohttps
++ {{- $_ := merge (dict "componentLabel" "autohttps") . -}}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+ {{- else }}
+- component: proxy
++ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ {{- end }}
+- release: {{ .Release.Name }}
+ ports:
+ {{- if $HTTPS }}
+ - name: https
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+index 588cf19..1bed905 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
+@@ -4,22 +4,9 @@ kind: PriorityClass
+ metadata:
+ name: {{ include "jupyterhub.priority.fullname" . }}
+ annotations:
+- # FIXME: PriorityClasses must be added before the other resources reference
+- # them, and in the past a workaround was needed to accomplish this:
+- # to make the resource a Helm hook.
+- #
+- # To transition this resource to no longer be a Helm hook resource,
+- # we explicitly add ownership annotations/labels (in 1.0.0) which
+- # will allow a future upgrade (in 2.0.0) to remove all hook and
+- # ownership annotations/labels.
+- #
+- helm.sh/hook: pre-install,pre-upgrade
+- helm.sh/hook-delete-policy: before-hook-creation
+- helm.sh/hook-weight: "-100"
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+- app.kubernetes.io/managed-by: Helm
+ {{- $_ := merge (dict "componentLabel" "default-priority") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+index b1dc6c5..800ac20 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
+@@ -3,11 +3,7 @@ The cluster autoscaler should be allowed to evict and reschedule these pods if
+ it would help in order to scale down a node.
+ */}}
+ {{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.user-placeholder.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+index e03497d..688e217 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
+@@ -5,22 +5,9 @@ kind: PriorityClass
+ metadata:
+ name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ annotations:
+- # FIXME: PriorityClasses must be added before the other resources reference
+- # them, and in the past a workaround was needed to accomplish this:
+- # to make the resource a Helm hook.
+- #
+- # To transition this resource to no longer be a Helm hook resource,
+- # we explicitly add ownership annotations/labels (in 1.0.0) which
+- # will allow a future upgrade (in 2.0.0) to remove all hook and
+- # ownership annotations/labels.
+- #
+- helm.sh/hook: pre-install,pre-upgrade
+- helm.sh/hook-delete-policy: before-hook-creation
+- helm.sh/hook-weight: "-100"
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+- app.kubernetes.io/managed-by: Helm
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }}
+ globalDefault: false
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+index 114f626..c243bee 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
+@@ -16,6 +16,9 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
+ podManagementPolicy: Parallel
++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.revisionHistoryLimit }}
++ {{- end }}
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }}
+ selector:
+ matchLabels:
+@@ -23,9 +26,16 @@ spec:
+ serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
+ template:
+ metadata:
++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userPlaceholder.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+@@ -33,7 +43,10 @@ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }}
+ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.singleuser.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.userPods.tolerations .Values.apps.jupyterhub.singleuser.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+index ef8a37f..3e83b44 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
+@@ -6,16 +6,28 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ data:
+- # ref: https://kubernetes.io/docs/reference/scheduling/config/
++ {{- /*
++ This is configuration of a k8s official kube-scheduler binary running in the
++ user-scheduler.
++
++ ref: https://kubernetes.io/docs/reference/scheduling/config/
++ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
++ */}}
+ config.yaml: |
+- apiVersion: kubescheduler.config.k8s.io/v1beta1
++ apiVersion: kubescheduler.config.k8s.io/v1
+ kind: KubeSchedulerConfiguration
+ leaderElection:
+- resourceLock: endpoints
++ resourceLock: leases
+ resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resourceNamespace: "{{ .Release.Namespace }}"
+ profiles:
+ - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.plugins }}
+ plugins:
+- {{- .Values.apps.jupyterhub.scheduling.userScheduler.plugins | toYaml | nindent 10 }}
++ {{- . | toYaml | nindent 10 }}
++ {{- end }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.pluginConfig }}
++ pluginConfig:
++ {{- . | toYaml | nindent 10 }}
++ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+index 1bcaf31..f22d0de 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
+@@ -6,6 +6,9 @@ metadata:
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
++ {{- if typeIs "int" .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
++ revisionHistoryLimit: {{ .Values.apps.jupyterhub.scheduling.userScheduler.revisionHistoryLimit }}
++ {{- end }}
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }}
+ selector:
+ matchLabels:
+@@ -14,16 +17,25 @@ spec:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.labels }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ annotations:
+ checksum/config-map: {{ include (print $.Template.BasePath "/jupyterhub/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.annotations }}
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ spec:
+- {{- if .Values.apps.jupyterhub.rbac.enabled }}
+- serviceAccountName: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
++ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
++ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+- nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
++ nodeSelector:
++ {{- . | toYaml | nindent 8 }}
++ {{- end }}
+ {{- with concat .Values.apps.jupyterhub.scheduling.corePods.tolerations .Values.apps.jupyterhub.scheduling.userScheduler.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+@@ -44,13 +56,6 @@ spec:
+ {{- end }}
+ command:
+ - /usr/local/bin/kube-scheduler
+- # NOTE: --leader-elect-... (new) and --lock-object-... (deprecated)
+- # flags are silently ignored in favor of whats defined in the
+- # passed KubeSchedulerConfiguration whenever --config is
+- # passed.
+- #
+- # ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/
+- #
+ # NOTE: --authentication-skip-lookup=true is used to avoid a
+ # seemingly harmless error, if we need to not skip
+ # "authentication lookup" in the future, see the linked issue.
+@@ -65,12 +70,14 @@ spec:
+ livenessProbe:
+ httpGet:
+ path: /healthz
+- port: 10251
++ scheme: HTTPS
++ port: 10259
+ initialDelaySeconds: 15
+ readinessProbe:
+ httpGet:
+ path: /healthz
+- port: 10251
++ scheme: HTTPS
++ port: 10259
+ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+index 04f2af8..2c9c6de 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
+@@ -1,9 +1,5 @@
+ {{- if and .Values.apps.jupyterhub.scheduling.userScheduler.enabled .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}}
+-{{- if .Capabilities.APIVersions.Has "policy/v1" }}
+ apiVersion: policy/v1
+-{{- else }}
+-apiVersion: policy/v1beta1
+-{{- end }}
+ kind: PodDisruptionBudget
+ metadata:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+index 083e065..9c7fab7 100755
+--- a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
+@@ -1,16 +1,5 @@
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+-{{- if .Values.apps.jupyterhub.rbac.enabled }}
+-apiVersion: v1
+-kind: ServiceAccount
+-metadata:
+- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+- labels:
+- {{- include "jupyterhub.labels" . | nindent 4 }}
+- {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
+- annotations:
+- {{- . | toYaml | nindent 4 }}
+- {{- end }}
+----
++{{- if .Values.apps.jupyterhub.rbac.create -}}
+ kind: ClusterRole
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+@@ -19,13 +8,23 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ rules:
+ # Copied from the system:kube-scheduler ClusterRole of the k8s version
+- # matching the kube-scheduler binary we use. A modification of two resource
+- # name references from kube-scheduler to user-scheduler-lock was made.
++ # matching the kube-scheduler binary we use. A modification has been made to
++ # resourceName fields to remain relevant for how we have named our resources
++ # in this Helm chart.
+ #
+- # NOTE: These rules have been unchanged between 1.12 and 1.15, then changed in
+- # 1.16 and in 1.17, but unchanged in 1.18 and 1.19.
++ # NOTE: These rules have been:
++ # - unchanged between 1.12 and 1.15
++ # - changed in 1.16
++ # - changed in 1.17
++ # - unchanged between 1.18 and 1.20
++ # - changed in 1.21: get/list/watch permission for namespace,
++ # csidrivers, csistoragecapacities was added.
++ # - unchanged between 1.22 and 1.27
++ # - changed in 1.28: permissions to get/update lock endpoint resource
++ # removed
++ # - unchanged between 1.28 and 1.29
+ #
+- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L696-L829
++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862
+ - apiGroups:
+ - ""
+ - events.k8s.io
+@@ -50,21 +49,6 @@ rules:
+ verbs:
+ - get
+ - update
+- - apiGroups:
+- - ""
+- resources:
+- - endpoints
+- verbs:
+- - create
+- - apiGroups:
+- - ""
+- resourceNames:
+- - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+- resources:
+- - endpoints
+- verbs:
+- - get
+- - update
+ - apiGroups:
+ - ""
+ resources:
+@@ -159,13 +143,37 @@ rules:
+ - get
+ - list
+ - watch
++ - apiGroups:
++ - ""
++ resources:
++ - namespaces
++ verbs:
++ - get
++ - list
++ - watch
++ - apiGroups:
++ - storage.k8s.io
++ resources:
++ - csidrivers
++ verbs:
++ - get
++ - list
++ - watch
++ - apiGroups:
++ - storage.k8s.io
++ resources:
++ - csistoragecapacities
++ verbs:
++ - get
++ - list
++ - watch
+
+ # Copied from the system:volume-scheduler ClusterRole of the k8s version
+ # matching the kube-scheduler binary we use.
+ #
+- # NOTE: These rules have not changed between 1.12 and 1.19.
++ # NOTE: These rules have not changed between 1.12 and 1.29.
+ #
+- # ref: https://github.com/kubernetes/kubernetes/blob/v1.19.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1213-L1240
++ # ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310
+ - apiGroups:
+ - ""
+ resources:
+@@ -203,7 +211,7 @@ metadata:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ subjects:
+ - kind: ServiceAccount
+- name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+ roleRef:
+ kind: ClusterRole
+diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
+new file mode 100644
+index 0000000..67618b0
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/serviceaccount.yaml
+@@ -0,0 +1,14 @@
++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
++{{- if .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.create -}}
++apiVersion: v1
++kind: ServiceAccount
++metadata:
++ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++ {{- with .Values.apps.jupyterhub.scheduling.userScheduler.serviceAccount.annotations }}
++ annotations:
++ {{- . | toYaml | nindent 4 }}
++ {{- end }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+index 3dfb137..931a150 100755
+--- a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
++++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
+@@ -62,23 +62,38 @@ spec:
+
+ egress:
+ # singleuser-server --> hub
+- - ports:
+- - port: 8081
+- to:
++ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8081
+
+- # singleuser-server --> Kubernetes internal DNS
+- - ports:
+- - protocol: UDP
+- port: 53
+- - protocol: TCP
+- port: 53
++ # singleuser-server --> proxy
++ # singleuser-server --> autohttps
++ #
++ # While not critical for core functionality, a user or library code may rely
++ # on communicating with the proxy or autohttps pods via a k8s Service it can
++ # detected from well known environment variables.
++ #
++ - to:
++ - podSelector:
++ matchLabels:
++ {{- $_ := merge (dict "componentLabel" "proxy") . }}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8000
++ - to:
++ - podSelector:
++ matchLabels:
++ {{- $_ := merge (dict "componentLabel" "autohttps") . }}
++ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
++ ports:
++ - port: 8080
++ - port: 8443
+
+- {{- with .Values.apps.jupyterhub.singleuser.networkPolicy.egress }}
+- # singleuser-server --> depends, but the default is everything
+- {{- . | toYaml | nindent 4 }}
++ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.apps.jupyterhub.singleuser.networkPolicy)) }}
++ {{- . | nindent 4 }}
+ {{- end }}
+ {{- end }}
+diff --git a/applications/jupyterhub/deploy/templates/singleuser/secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
+new file mode 100644
+index 0000000..e6eab9b
+--- /dev/null
++++ b/applications/jupyterhub/deploy/templates/singleuser/secret.yaml
+@@ -0,0 +1,17 @@
++{{- if .Values.apps.jupyterhub.singleuser.extraFiles }}
++kind: Secret
++apiVersion: v1
++metadata:
++ name: {{ include "jupyterhub.singleuser.fullname" . }}
++ labels:
++ {{- include "jupyterhub.labels" . | nindent 4 }}
++type: Opaque
++{{- with include "jupyterhub.extraFiles.data" .Values.apps.jupyterhub.singleuser.extraFiles }}
++data:
++ {{- . | nindent 2 }}
++{{- end }}
++{{- with include "jupyterhub.extraFiles.stringData" .Values.apps.jupyterhub.singleuser.extraFiles }}
++stringData:
++ {{- . | nindent 2 }}
++{{- end }}
++{{- end }}
+diff --git a/applications/jupyterhub/deploy/values.schema.yaml b/applications/jupyterhub/deploy/values.schema.yaml
+new file mode 100644
+index 0000000..69c13a8
+--- /dev/null
++++ b/applications/jupyterhub/deploy/values.schema.yaml
+@@ -0,0 +1,3014 @@
++# This schema (a jsonschema in YAML format) is used to generate
++# values.schema.json which is packaged with the Helm chart for client side
++# validation by helm of values before template rendering.
++#
++# This schema is also used by our documentation system to build the
++# configuration reference section based on the description fields. See
++# docs/source/conf.py for that logic!
++#
++# We look to document everything we have default values for in values.yaml, but
++# we don't look to enforce the perfect validation logic within this file.
++#
++# ref: https://json-schema.org/learn/getting-started-step-by-step.html
++#
++$schema: http://json-schema.org/draft-07/schema#
++type: object
++additionalProperties: false
++required:
++ - imagePullSecrets
++ - hub
++ - proxy
++ - singleuser
++ - ingress
++ - prePuller
++ - custom
++ - cull
++ - debug
++ - rbac
++ - global
++properties:
++ enabled:
++ type: [boolean, "null"]
++ description: |
++ `enabled` is ignored by the jupyterhub chart itself, but a chart depending
++ on the jupyterhub chart conditionally can make use this config option as
++ the condition.
++ fullnameOverride:
++ type: [string, "null"]
++ description: |
++ fullnameOverride and nameOverride allow you to adjust how the resources
++ part of the Helm chart are named.
++
++ Name format | Resource types | fullnameOverride | nameOverride | Note
++ ------------------------- | -------------- | ---------------- | ------------ | -
++ component | namespaced | `""` | * | Default
++ release-component | cluster wide | `""` | * | Default
++ fullname-component | * | str | * | -
++ release-component | * | null | `""` | -
++ release-(name-)component | * | null | str | omitted if contained in release
++ release-(chart-)component | * | null | null | omitted if contained in release
++
++ ```{admonition} Warning!
++ :class: warning
++ Changing fullnameOverride or nameOverride after the initial installation
++ of the chart isn't supported. Changing their values likely leads to a
++ reset of non-external JupyterHub databases, abandonment of users' storage,
++ and severed couplings to currently running user pods.
++ ```
++
++ If you are a developer of a chart depending on this chart, you should
++ avoid hardcoding names. If you want to reference the name of a resource in
++ this chart from a parent helm chart's template, you can make use of the
++ global named templates instead.
++
++ ```yaml
++ # some pod definition of a parent chart helm template
++ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ ```
++
++ To access them from a container, you can also rely on the hub ConfigMap
++ that contains entries of all the resource names.
++
++ ```yaml
++ # some container definition in a parent chart helm template
++ env:
++ - name: SCHEDULER_NAME
++ valueFrom:
++ configMapKeyRef:
++ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
++ key: user-scheduler
++ ```
++
++ nameOverride:
++ type: [string, "null"]
++ description: |
++ See the documentation under [`fullnameOverride`](schema_fullnameOverride).
++
++ imagePullSecret:
++ type: object
++ required: [create]
++ if:
++ properties:
++ create:
++ const: true
++ then:
++ additionalProperties: false
++ required: [registry, username, password]
++ description: |
++ This is configuration to create a k8s Secret resource of `type:
++ kubernetes.io/dockerconfigjson`, with credentials to pull images from a
++ private image registry. If you opt to do so, it will be available for use
++ by all pods in their respective `spec.imagePullSecrets` alongside other
++ k8s Secrets defined in `imagePullSecrets` or the pod respective
++ `...image.pullSecrets` configuration.
++
++ In other words, using this configuration option can automate both the
++ otherwise manual creation of a k8s Secret and the otherwise manual
++ configuration to reference this k8s Secret in all the pods of the Helm
++ chart.
++
++ ```sh
++ # you won't need to create a k8s Secret manually...
++ kubectl create secret docker-registry image-pull-secret \
++ --docker-server= \
++ --docker-username= \
++ --docker-email= \
++ --docker-password=
++ ```
++
++ If you just want to let all Pods reference an existing secret, use the
++ [`imagePullSecrets`](schema_imagePullSecrets) configuration instead.
++ properties:
++ create:
++ type: boolean
++ description: |
++ Toggle the creation of the k8s Secret with provided credentials to
++ access a private image registry.
++ automaticReferenceInjection:
++ type: boolean
++ description: |
++ Toggle the automatic reference injection of the created Secret to all
++ pods' `spec.imagePullSecrets` configuration.
++ registry:
++ type: string
++ description: |
++ Name of the private registry you want to create a credential set for.
++ It will default to Docker Hub's image registry.
++
++ Examples:
++ - https://index.docker.io/v1/
++ - quay.io
++ - eu.gcr.io
++ - alexmorreale.privatereg.net
++ username:
++ type: string
++ description: |
++ Name of the user you want to use to connect to your private registry.
++
++ For external gcr.io, you will use the `_json_key`.
++
++ Examples:
++ - alexmorreale
++ - alex@pfc.com
++ - _json_key
++ password:
++ type: string
++ description: |
++ Password for the private image registry's user.
++
++ Examples:
++ - plaintextpassword
++ - abc123SECRETzyx098
++
++ For gcr.io registries the password will be a big JSON blob for a
++ Google cloud service account, it should look something like below.
++
++ ```yaml
++ password: |-
++ {
++ "type": "service_account",
++ "project_id": "jupyter-se",
++ "private_key_id": "f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85",
++ ...
++ }
++ ```
++ email:
++ type: [string, "null"]
++ description: |
++ Specification of an email is most often not required, but it is
++ supported.
++
++ imagePullSecrets:
++ type: array
++ description: |
++ Chart wide configuration to _append_ k8s Secret references to all its
++ pod's `spec.imagePullSecrets` configuration.
++
++ This will not override or get overridden by pod specific configuration,
++ but instead augment the pod specific configuration.
++
++ You can use both the k8s native syntax, where each list element is like
++ `{"name": "my-secret-name"}`, or you can let list elements be strings
++ naming the secrets directly.
++
++ hub:
++ type: object
++ additionalProperties: false
++ required: [baseUrl]
++ properties:
++ revisionHistoryLimit: &revisionHistoryLimit
++ type: [integer, "null"]
++ minimum: 0
++ description: |
++ Configures the resource's `spec.revisionHistoryLimit`. This is
++ available for Deployment, StatefulSet, and DaemonSet resources.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)
++ for more info.
++ config:
++ type: object
++ additionalProperties: true
++ description: |
++ JupyterHub and its components (authenticators, spawners, etc), are
++ Python classes that expose its configuration through
++ [_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this
++ Helm chart configuration (`hub.config`), you can directly configure
++ the Python classes through _static_ YAML values. To _dynamically_ set
++ values, you need to use [`hub.extraConfig`](schema_hub.extraConfig)
++ instead.
++
++ ```{admonition} Currently intended only for auth config
++ :class: warning
++ This config _currently_ (0.11.0) only influence the software in the
++ `hub` Pod, but some Helm chart config options such as
++ [`hub.baseUrl`](schema_hub.baseUrl) is used to set
++ `JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm
++ templates are rendered.
++
++ As we have not yet mapped out all the potential configuration
++ conflicts except for the authentication related configuration options,
++ please accept that using it for something else at this point can lead
++ to issues.
++ ```
++
++ __Example__
++
++ If you inspect documentation or some `jupyterhub_config.py` to contain
++ the following section:
++
++ ```python
++ c.JupyterHub.admin_access = true
++ c.JupyterHub.admin_users = ["jovyan1", "jovyan2"]
++ c.KubeSpawner.k8s_api_request_timeout = 10
++ c.GitHubOAuthenticator.allowed_organizations = ["jupyterhub"]
++ ```
++
++ Then, you would be able to represent it with this configuration like:
++
++ ```yaml
++ hub:
++ config:
++ JupyterHub:
++ admin_access: true
++ admin_users:
++ - jovyan1
++ - jovyan2
++ KubeSpawner:
++ k8s_api_request_timeout: 10
++ GitHubOAuthenticator:
++ allowed_organizations:
++ - jupyterhub
++ ```
++
++ ```{admonition} YAML limitations
++ :class: tip
++ You can't represent Python `Bytes` or `Set` objects in YAML directly.
++ ```
++
++ ```{admonition} Helm value merging
++ :class: tip
++ `helm` merges a Helm chart's default values with values passed with
++ the `--values` or `-f` flag. During merging, lists are replaced while
++ dictionaries are updated.
++ ```
++ extraFiles: &extraFiles
++ type: object
++ additionalProperties: false
++ description: |
++ A dictionary with extra files to be injected into the pod's container
++ on startup. This can for example be used to inject: configuration
++ files, custom user interface templates, images, and more.
++
++ ```yaml
++ # NOTE: "hub" is used in this example, but the configuration is the
++ # same for "singleuser".
++ hub:
++ extraFiles:
++ # The file key is just a reference that doesn't influence the
++ # actual file name.
++ :
++ # mountPath is required and must be the absolute file path.
++ mountPath:
++
++ # Choose one out of the three ways to represent the actual file
++ # content: data, stringData, or binaryData.
++ #
++ # data should be set to a mapping (dictionary). It will in the
++ # end be rendered to either YAML, JSON, or TOML based on the
++ # filename extension that are required to be either .yaml, .yml,
++ # .json, or .toml.
++ #
++ # If your content is YAML, JSON, or TOML, it can make sense to
++ # use data to represent it over stringData as data can be merged
++ # instead of replaced if set partially from separate Helm
++ # configuration files.
++ #
++ # Both stringData and binaryData should be set to a string
++ # representing the content, where binaryData should be the
++ # base64 encoding of the actual file content.
++ #
++ data:
++ myConfig:
++ myMap:
++ number: 123
++ string: "hi"
++ myList:
++ - 1
++ - 2
++ stringData: |
++ hello world!
++ binaryData: aGVsbG8gd29ybGQhCg==
++
++ # mode is by default 0644 and you can optionally override it
++ # either by octal notation (example: 0400) or decimal notation
++ # (example: 256).
++ mode:
++ ```
++
++ **Using --set-file**
++
++ To avoid embedding entire files in the Helm chart configuration, you
++ can use the `--set-file` flag during `helm upgrade` to set the
++ stringData or binaryData field.
++
++ ```yaml
++ hub:
++ extraFiles:
++ my_image:
++ mountPath: /usr/local/share/jupyterhub/static/my_image.png
++
++ # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are
++ # automatically loaded in alphabetical order of the final file
++ # name when JupyterHub starts.
++ my_config:
++ mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py
++ ```
++
++ ```bash
++ # --set-file expects a text based file, so you need to base64 encode
++ # it manually first.
++ base64 my_image.png > my_image.png.b64
++
++ helm upgrade <...> \
++ --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \
++ --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py
++ ```
++
++ **Common uses**
++
++ 1. **JupyterHub template customization**
++
++ You can replace the default JupyterHub user interface templates in
++ the hub pod by injecting new ones to
++ `/usr/local/share/jupyterhub/templates`. These can in turn
++ reference custom images injected to
++ `/usr/local/share/jupyterhub/static`.
++
++ 1. **JupyterHub standalone file config**
++
++ Instead of embedding JupyterHub python configuration as a string
++ within a YAML file through
++ [`hub.extraConfig`](schema_hub.extraConfig), you can inject a
++ standalone .py file into
++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is
++ automatically loaded.
++
++ 1. **Flexible configuration**
++
++ By injecting files, you don't have to embed them in a docker image
++ that you have to rebuild.
++
++ If your configuration file is a YAML/JSON/TOML file, you can also
++ use `data` instead of `stringData` which allow you to set various
++ configuration in separate Helm config files. This can be useful to
++ help dependent charts override only some configuration part of the
++ file, or to allow for the configuration be set through multiple
++ Helm configuration files.
++
++ **Limitations**
++
++ 1. File size
++
++ The files in `hub.extraFiles` and `singleuser.extraFiles` are
++ respectively stored in their own k8s Secret resource. As k8s
++ Secret's are limited, typically to 1MB, you will be limited to a
++ total file size of less than 1MB as there is also base64 encoding
++ that takes place reducing available capacity to 75%.
++
++ 2. File updates
++
++ The files that are mounted are only set during container startup.
++ This is [because we use
++ `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)
++ as is required to avoid replacing the content of the entire
++ directory we mount in.
++ patternProperties:
++ ".*":
++ type: object
++ additionalProperties: false
++ required: [mountPath]
++ oneOf:
++ - required: [data]
++ - required: [stringData]
++ - required: [binaryData]
++ properties:
++ mountPath:
++ type: string
++ data:
++ type: object
++ additionalProperties: true
++ stringData:
++ type: string
++ binaryData:
++ type: string
++ mode:
++ type: number
++ baseUrl:
++ type: string
++ description: |
++ This is the equivalent of c.JupyterHub.base_url, but it is also needed
++ by the Helm chart in general. So, instead of setting
++ c.JupyterHub.base_url, use this configuration.
++ command:
++ type: array
++ description: |
++ A list of strings to be used to replace the JupyterHub image's
++ `ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's
++ `ENTRYPOINT` is called `command`. The list of strings will be expanded
++ with Helm's template function `tpl` which can render Helm template
++ logic inside curly braces (`{{... }}`).
++
++ This could be useful to wrap the invocation of JupyterHub itself in
++ some custom way.
++
++ For more details, see the [Kubernetes
++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
++ args:
++ type: array
++ description: |
++ A list of strings to be used to replace the JupyterHub image's `CMD`
++ entry as well as the Helm chart's default way to start JupyterHub.
++ Note that in k8s lingo, the Dockerfile's `CMD` is called `args`. The
++ list of strings will be expanded with Helm's template function `tpl`
++ which can render Helm template logic inside curly braces (`{{... }}`).
++
++ ```{warning}
++ By replacing the entire configuration file, which is mounted to
++ `/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart,
++ instead of appending to it with `hub.extraConfig`, you expose your
++ deployment for issues stemming from getting out of sync with the Helm
++ chart's config file.
++
++ These kind of issues will be significantly harder to debug and
++ diagnose, and can due to this could cause a lot of time expenditure
++ for both the community maintaining the Helm chart as well as yourself,
++ even if this wasn't the reason for the issue.
++
++ Due to this, we ask that you do your _absolute best to avoid replacing
++ the default provided `jupyterhub_config.py` file. It can often be
++ possible. For example, if your goal is to have a dedicated .py file
++ for more extensive additions that you can syntax highlight and such
++ and feel limited by passing code in `hub.extraConfig` which is part of
++ a YAML file, you can use [this
++ trick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237)
++ instead.
++ ```
++
++ ```yaml
++ hub:
++ args:
++ - "jupyterhub"
++ - "--config"
++ - "/usr/local/etc/jupyterhub/jupyterhub_config.py"
++ - "--debug"
++ - "--upgrade-db"
++ ```
++
++ For more details, see the [Kubernetes
++ documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).
++ cookieSecret:
++ type: [string, "null"]
++ description: |
++ ```{note}
++ As of version 1.0.0 this will automatically be generated and there is
++ no need to set it manually.
++
++ If you wish to reset a generated key, you can use `kubectl edit` on
++ the k8s Secret typically named `hub` and remove the
++ `hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then
++ perform a new `helm upgrade`.
++ ```
++
++ A 32-byte cryptographically secure randomly generated string used to sign values of
++ secure cookies set by the hub. If unset, jupyterhub will generate one on startup and
++ save it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of
++ the hub container. A value set here will make JupyterHub overwrite any previous file.
++
++ You do not need to set this at all if you are using the default configuration for
++ storing databases - sqlite on a persistent volume (with `hub.db.type` set to the
++ default `sqlite-pvc`). If you are using an external database, then you must set this
++ value explicitly - or your users will keep getting logged out each time the hub pod
++ restarts.
++
++ Changing this value will all user logins to be invalidated. If this secret leaks,
++ *immediately* change it to something else, or user data can be compromised
++
++ ```sh
++ # to generate a value, run
++ openssl rand -hex 32
++ ```
++ image: &image-spec
++ type: object
++ additionalProperties: false
++ required: [name, tag]
++ description: |
++ Set custom image name, tag, pullPolicy, or pullSecrets for the pod.
++ properties:
++ name:
++ type: string
++ description: |
++ The name of the image, without the tag.
++
++ ```
++ # example name
++ gcr.io/my-project/my-image
++ ```
++ tag:
++ type: string
++ description: |
++ The tag of the image to pull. This is the value following `:` in
++ complete image specifications.
++
++ ```
++ # example tags
++ v1.11.1
++ zhy270a
++ ```
++ pullPolicy:
++ enum: [null, "", IfNotPresent, Always, Never]
++ description: |
++ Configures the Pod's `spec.imagePullPolicy`.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)
++ for more info.
++ pullSecrets:
++ type: array
++ description: |
++ A list of references to existing Kubernetes Secrets with
++ credentials to pull the image.
++
++ This Pod's final `imagePullSecrets` k8s specification will be a
++ combination of:
++
++ 1. This list of k8s Secrets, specific for this pod.
++ 2. The list of k8s Secrets, for use by all pods in the Helm chart,
++ declared in this Helm charts configuration called
++ `imagePullSecrets`.
++ 3. A k8s Secret, for use by all pods in the Helm chart, if
++ conditionally created from image registry credentials provided
++ under `imagePullSecret` if `imagePullSecret.create` is set to
++ true.
++
++ ```yaml
++ # example - k8s native syntax
++ pullSecrets:
++ - name: my-k8s-secret-with-image-registry-credentials
++
++ # example - simplified syntax
++ pullSecrets:
++ - my-k8s-secret-with-image-registry-credentials
++ ```
++ networkPolicy: &networkPolicy-spec
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration regards the creation and configuration of a k8s
++ _NetworkPolicy resource_.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Toggle the creation of the NetworkPolicy resource targeting this
++ pod, and by doing so, restricting its communication to only what
++ is explicitly allowed in the NetworkPolicy.
++ ingress:
++ type: array
++ description: |
++ Additional ingress rules to add besides those that are required
++ for core functionality.
++ egress:
++ type: array
++ description: |
++ Additional egress rules to add besides those that are required for
++ core functionality and those added via
++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).
++
++ ```{versionchanged} 2.0.0
++ The default value changed from providing one very permissive rule
++ allowing all egress to providing no rule. The permissive rule is
++ still provided via
++ [`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)
++ set to true though.
++ ```
++
++ As an example, below is a configuration that disables the more
++ broadly permissive `.privateIPs` egress allow rule for the hub
++ pod, and instead provides tightly scoped permissions to access a
++ specific k8s local service as identified by pod labels.
++
++ ```yaml
++ hub:
++ networkPolicy:
++ egressAllowRules:
++ privateIPs: false
++ egress:
++ - to:
++ - podSelector:
++ matchLabels:
++ app: my-k8s-local-service
++ ports:
++ - protocol: TCP
++ port: 5978
++ ```
++ egressAllowRules:
++ type: object
++ additionalProperties: false
++ description: |
++ This is a set of predefined rules that when enabled will be added
++ to the NetworkPolicy list of egress rules.
++
++ The resulting egress rules will be a composition of:
++ - rules specific for the respective pod(s) function within the
++ Helm chart
++ - rules based on enabled `egressAllowRules` flags
++ - rules explicitly specified by the user
++
++ ```{note}
++ Each flag under this configuration will not render into a
++ dedicated rule in the NetworkPolicy resource, but instead combine
++ with the other flags to a reduced set of rules to avoid a
++ performance penalty.
++ ```
++
++ ```{versionadded} 2.0.0
++ ```
++ properties:
++ cloudMetadataServer:
++ type: boolean
++ description: |
++ Defaults to `false` for singleuser servers, but to `true` for
++ all other network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the cloud metadata server.
++
++ Note that the `nonPrivateIPs` rule is allowing all non Private
++ IP ranges but makes an exception for the cloud metadata
++ server, leaving this as the definitive configuration to allow
++ access to the cloud metadata server.
++
++ ```{versionchanged} 3.0.0
++ This configuration is not allowed to be configured true at the
++ same time as
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ to avoid an ambiguous configuration.
++ ```
++ dnsPortsCloudMetadataServer:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the cloud metadata server
++ via port 53.
++
++ Relying on this rule for the singleuser config should go hand
++ in hand with disabling
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ to avoid an ambiguous configuration.
++
++ Known situations when this rule can be relevant:
++
++ - In GKE clusters with Cloud DNS that is reached at the
++ cloud metadata server's non-private IP.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++ ```
++
++ ```{versionadded} 3.0.0
++ ```
++ dnsPortsKubeSystemNamespace:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to pods in the kube-system
++ namespace via port 53.
++
++ Known situations when this rule can be relevant:
++
++ - GKE, EKS, AKS, and other clusters relying directly on
++ `kube-dns` or `coredns` pods in the `kube-system` namespace.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++ ```
++
++ ```{versionadded} 3.0.0
++ ```
++ dnsPortsPrivateIPs:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to private IPs via port 53.
++
++ Known situations when this rule can be relevant:
++
++ - GKE clusters relying on a DNS server indirectly via a a node
++ local DNS cache at an unknown private IP.
++
++ ```{note}
++ This chart doesn't know how to identify the DNS server that
++ pods will rely on due to variations between how k8s clusters
++ have been setup. Due to that, multiple rules are enabled by
++ default to ensure DNS connectivity.
++
++ ```{warning}
++ This rule is not expected to work in clusters relying on
++ Cilium to enforce the NetworkPolicy rules (includes GKE
++ clusters with Dataplane v2), this is due to a [known
++ limitation](https://github.com/cilium/cilium/issues/9209).
++ ```
++ nonPrivateIPs:
++ type: boolean
++ description: |
++ Defaults to `true` for all network policies.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the non-private IP ranges
++ with the exception of the cloud metadata server. This means
++ respective pod(s) can establish connections to the internet
++ but not (say) an unsecured prometheus server running in the
++ same cluster.
++ privateIPs:
++ type: boolean
++ description: |
++ Defaults to `false` for singleuser servers, but to `true` for
++ all other network policies.
++
++ Private IPs refer to the IP ranges `10.0.0.0/8`,
++ `172.16.0.0/12`, `192.168.0.0/16`.
++
++ When enabled this rule allows the respective pod(s) to
++ establish outbound connections to the internal k8s cluster.
++ This means users can access the internet but not (say) an
++ unsecured prometheus server running in the same cluster.
++
++ Since not all workloads in the k8s cluster may have
++ NetworkPolicies setup to restrict their incoming connections,
++ having this set to false can be a good defense against
++ malicious intent from someone in control of software in these
++ pods.
++
++ If possible, try to avoid setting this to true as it gives
++ broad permissions that could be specified more directly via
++ the [`.egress`](schema_singleuser.networkPolicy.egress).
++
++ ```{warning}
++ This rule is not expected to work in clusters relying on
++ Cilium to enforce the NetworkPolicy rules (includes GKE
++ clusters with Dataplane v2), this is due to a [known
++ limitation](https://github.com/cilium/cilium/issues/9209).
++ ```
++ interNamespaceAccessLabels:
++ enum: [accept, ignore]
++ description: |
++ This configuration option determines if both namespaces and pods
++ in other namespaces, that have specific access labels, should be
++ accepted to allow ingress (set to `accept`), or, if the labels are
++ to be ignored when applied outside the local namespace (set to
++ `ignore`).
++
++ The available access labels for respective NetworkPolicy resources
++ are:
++
++ - `hub.jupyter.org/network-access-hub: "true"` (hub)
++ - `hub.jupyter.org/network-access-proxy-http: "true"` (proxy.chp, proxy.traefik)
++ - `hub.jupyter.org/network-access-proxy-api: "true"` (proxy.chp)
++ - `hub.jupyter.org/network-access-singleuser: "true"` (singleuser)
++ allowedIngressPorts:
++ type: array
++ description: |
++ A rule to allow ingress on these ports will be added no matter
++ what the origin of the request is. The default setting for
++ `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is
++ `[http, https]`, while it is `[]` for other networkPolicies.
++
++ Note that these port names or numbers target a Pod's port name or
++ number, not a k8s Service's port name or number.
++ db:
++ type: object
++ additionalProperties: false
++ properties:
++ type:
++ enum: [sqlite-pvc, sqlite-memory, mysql, postgres, other]
++ description: |
++ Type of database backend to use for the hub database.
++
++ The Hub requires a persistent database to function, and this lets you specify
++ where it should be stored.
++
++ The various options are:
++
++ 1. **sqlite-pvc**
++
++ Use an `sqlite` database kept on a persistent volume attached to the hub.
++
++ By default, this disk is created by the cloud provider using
++ *dynamic provisioning* configured by a [storage
++ class](https://kubernetes.io/docs/concepts/storage/storage-classes/).
++ You can customize how this disk is created / attached by
++ setting various properties under `hub.db.pvc`.
++
++ This is the default setting, and should work well for most cloud provider
++ deployments.
++
++ 2. **sqlite-memory**
++
++ Use an in-memory `sqlite` database. This should only be used for testing,
++ since the database is erased whenever the hub pod restarts - causing the hub
++ to lose all memory of users who had logged in before.
++
++ When using this for testing, make sure you delete all other objects that the
++ hub has created (such as user pods, user PVCs, etc) every time the hub restarts.
++ Otherwise you might run into errors about duplicate resources.
++
++ 3. **mysql**
++
++ Use an externally hosted mysql database.
++
++ You have to specify an sqlalchemy connection string for the mysql database you
++ want to connect to in `hub.db.url` if using this option.
++
++ The general format of the connection string is:
++ ```
++ mysql+pymysql://:@:/
++ ```
++
++ The user specified in the connection string must have the rights to create
++ tables in the database specified.
++
++ 4. **postgres**
++
++ Use an externally hosted postgres database.
++
++ You have to specify an sqlalchemy connection string for the postgres database you
++ want to connect to in `hub.db.url` if using this option.
++
++ The general format of the connection string is:
++ ```
++ postgresql+psycopg2://:@:/
++ ```
++
++ The user specified in the connection string must have the rights to create
++ tables in the database specified.
++
++ 5. **other**
++
++ Use an externally hosted database of some kind other than mysql
++ or postgres.
++
++ When using _other_, the database password must be passed as
++ part of [hub.db.url](schema_hub.db.url) as
++ [hub.db.password](schema_hub.db.password) will be ignored.
++ pvc:
++ type: object
++ additionalProperties: false
++ required: [storage]
++ description: |
++ Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`.
++ properties:
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: &labels-and-annotations-patternProperties
++ ".*":
++ type: string
++ description: |
++ Annotations to apply to the PVC containing the sqlite database.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ selector:
++ type: object
++ additionalProperties: true
++ description: |
++ Label selectors to set for the PVC containing the sqlite database.
++
++ Useful when you are using a specific PV, and want to bind to
++ that and only that.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
++ for more details about using a label selector for what PV to
++ bind to.
++ storage:
++ type: string
++ description: |
++ Size of disk to request for the database disk.
++ accessModes:
++ type: array
++ items:
++ type: [string, "null"]
++ description: |
++ AccessModes contains the desired access modes the volume
++ should have. See [the k8s
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1)
++ for more information.
++ storageClassName:
++ type: [string, "null"]
++ description: |
++ Name of the StorageClass required by the claim.
++
++ If this is a blank string it will be set to a blank string,
++ while if it is null, it will not be set at all.
++ subPath:
++ type: [string, "null"]
++ description: |
++ Path within the volume from which the container's volume
++ should be mounted. Defaults to "" (volume's root).
++ upgrade:
++ type: [boolean, "null"]
++ description: |
++ Users with external databases need to opt-in for upgrades of the
++ JupyterHub specific database schema if needed as part of a
++ JupyterHub version upgrade.
++ url:
++ type: [string, "null"]
++ description: |
++ Connection string when `hub.db.type` is mysql or postgres.
++
++ See documentation for `hub.db.type` for more details on the format of this property.
++ password:
++ type: [string, "null"]
++ description: |
++ Password for the database when `hub.db.type` is mysql or postgres.
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the hub pod.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ initContainers:
++ type: array
++ description: |
++ list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ hub:
++ initContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the hub pod.
++
++ Environment variables are usually used to:
++ - Pass parameters to some custom code in `hub.extraConfig`.
++ - Configure code running in the hub pod, such as an authenticator or
++ spawner.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ hub:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ HUB_NAMESPACE:
++ name: HUB_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_HUB_NAMESPACE:
++ value: "my-prefix-$(HUB_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ extraConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ Arbitrary extra python based configuration that should be in `jupyterhub_config.py`.
++
++ This is the *escape hatch* - if you want to configure JupyterHub to do something specific
++ that is not present here as an option, you can write the raw Python to do it here.
++
++ extraConfig is a *dict*, so there can be multiple configuration
++ snippets under different names. The configuration sections are run in
++ alphabetical order based on the keys.
++
++ Non-exhaustive examples of things you can do here:
++ - Subclass authenticator / spawner to do a custom thing
++ - Dynamically launch different images for different sets of images
++ - Inject an auth token from GitHub authenticator into user pod
++ - Anything else you can think of!
++
++ Since this is usually a multi-line string, you want to format it using YAML's
++ [| operator](https://yaml.org/spec/1.2.2/#23-scalars).
++
++ For example:
++
++ ```yaml
++ hub:
++ extraConfig:
++ myConfig.py: |
++ c.JupyterHub.something = 'something'
++ c.Spawner.something_else = 'something else'
++ ```
++
++ ```{note}
++ No code validation is performed until JupyterHub loads it! If you make
++ a typo here, it will probably manifest itself as the hub pod failing
++ to start up and instead entering an `Error` state or the subsequent
++ `CrashLoopBackoff` state.
++
++ To make use of your own programs linters etc, it would be useful to
++ not embed Python code inside a YAML file. To do that, consider using
++ [`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to
++ `/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your
++ extra configuration logic.
++ ```
++
++ fsGid:
++ type: [integer, "null"]
++ minimum: 0
++ # This schema entry is needed to help us print a more helpful error
++ # message in NOTES.txt if hub.fsGid is set.
++ #
++ description: |
++ ```{note}
++ Removed in version 2.0.0. Use
++ [`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify
++ `fsGroup` instead.
++ ```
++ service:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to configure the service the JupyterHub will be exposed on by the Kubernetes server.
++ properties:
++ type:
++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
++ description: |
++ The Kubernetes ServiceType to be used.
++
++ The default type is `ClusterIP`.
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
++ to learn more about service types.
++ ports:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to configure the ports the hub service will be deployed on.
++ properties:
++ nodePort:
++ type: [integer, "null"]
++ minimum: 0
++ description: |
++ The nodePort to deploy the hub service on.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Kubernetes annotations to apply to the hub service.
++ extraPorts:
++ type: array
++ description: |
++ Extra ports to add to the Hub Service object besides `hub` / `8081`.
++ This should be an array that includes `name`, `port`, and `targetPort`.
++ See [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details.
++ loadBalancerIP:
++ type: [string, "null"]
++ description: |
++ A public IP address the hub Kubernetes service should be exposed
++ on. To expose the hub directly is not recommended. Instead route
++ traffic through the proxy-public service towards the hub.
++
++ pdb: &pdb-spec
++ type: object
++ additionalProperties: false
++ description: |
++ Configure a PodDisruptionBudget for this Deployment.
++
++ These are disabled by default for our deployments that don't support
++ being run in parallel with multiple replicas. Only the user-scheduler
++ currently supports being run in parallel with multiple replicas. If
++ they are enabled for a Deployment with only one replica, they will
++ block `kubectl drain` of a node for example.
++
++ Note that if you aim to block scaling down a node with the
++ hub/proxy/autohttps pod that would cause disruptions of the
++ deployment, then you should instead annotate the pods of the
++ Deployment [as described
++ here](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).
++
++ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)
++ for more details about disruptions.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Decides if a PodDisruptionBudget is created targeting the
++ Deployment's pods.
++ maxUnavailable:
++ type: [integer, "null"]
++ description: |
++ The maximum number of pods that can be unavailable during
++ voluntary disruptions.
++ minAvailable:
++ type: [integer, "null"]
++ description: |
++ The minimum number of pods required to be available during
++ voluntary disruptions.
++ existingSecret:
++ type: [string, "null"]
++ description: |
++ This option allow you to provide the name of an existing k8s Secret to
++ use alongside of the chart managed k8s Secret. The content of this k8s
++ Secret will be merged with the chart managed k8s Secret, giving
++ priority to the self-managed k8s Secret.
++
++ ```{warning}
++ 1. The self managed k8s Secret must mirror the structure in the chart
++ managed secret.
++ 2. [`proxy.secretToken`](schema_proxy.secretToken) (aka.
++ `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from
++ the chart managed k8s Secret.
++ ```
++ nodeSelector: &nodeSelector-spec
++ type: object
++ additionalProperties: true
++ description: |
++ An object with key value pairs representing labels. K8s Nodes are
++ required to have match all these labels for this Pod to scheduled on
++ them.
++
++ ```yaml
++ disktype: ssd
++ nodetype: awesome
++ ```
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)
++ for more details.
++ tolerations: &tolerations-spec
++ type: array
++ description: |
++ Tolerations allow a pod to be scheduled on nodes with taints. These
++ tolerations are additional tolerations to the tolerations common to
++ all pods of a their respective kind
++ ([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),
++ [scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).
++
++ Pass this field an array of
++ [`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)
++ objects.
++
++ See the [Kubernetes
++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
++ for more info.
++ activeServerLimit:
++ type: [integer, "null"]
++ description: &jupyterhub-native-config-description |
++ JupyterHub native configuration, see the [JupyterHub
++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)
++ for more information.
++ allowNamedServers:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s annotations for the hub pod.
++ authenticatePrometheus:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ concurrentSpawnLimit:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ consecutiveFailureLimit:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ podSecurityContext: &podSecurityContext-spec
++ additionalProperties: true
++ description: |
++ A k8s native specification of the pod's security context, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core)
++ for details.
++ containerSecurityContext: &containerSecurityContext-spec
++ type: object
++ additionalProperties: true
++ description: |
++ A k8s native specification of the container's security context, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)
++ for details.
++ deploymentStrategy:
++ type: object
++ additionalProperties: false
++ properties:
++ rollingUpdate:
++ type: [string, "null"]
++ type:
++ type: [string, "null"]
++ description: |
++ JupyterHub does not support running in parallel, due to this we
++ default to using a deployment strategy of Recreate.
++ extraContainers: &extraContainers-spec
++ type: array
++ description: |
++ Additional containers for the Pod. Use a k8s native syntax.
++ extraVolumeMounts: &extraVolumeMounts-spec
++ type: array
++ description: |
++ Additional volume mounts for the Container. Use a k8s native syntax.
++ extraVolumes: &extraVolumes-spec
++ type: array
++ description: |
++ Additional volumes for the Pod. Use a k8s native syntax.
++ livenessProbe: &probe-spec
++ type: object
++ additionalProperties: true
++ required: [enabled]
++ if:
++ properties:
++ enabled:
++ const: true
++ then:
++ description: |
++ This config option is like the k8s native specification of a
++ container probe, except that it also supports an `enabled` boolean
++ flag.
++
++ See [the k8s
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)
++ for more details.
++ readinessProbe: *probe-spec
++ namedServerLimitPerUser:
++ type: [integer, "null"]
++ description: *jupyterhub-native-config-description
++ redirectToServer:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ resources: &resources-spec
++ type: object
++ additionalProperties: true
++ description: |
++ A k8s native specification of resources, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).
++ lifecycle: &lifecycle-spec
++ type: object
++ additionalProperties: false
++ description: |
++ A k8s native specification of lifecycle hooks on the container, see [the
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core).
++ properties:
++ postStart:
++ type: object
++ additionalProperties: true
++ preStop:
++ type: object
++ additionalProperties: true
++ services:
++ type: object
++ additionalProperties: true
++ description: |
++ This is where you register JupyterHub services. For details on how to
++ configure these services in this Helm chart just keep reading but for
++ details on services themselves instead read [JupyterHub's
++ documentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html).
++
++ ```{note}
++ Only a selection of JupyterHub's configuration options that can be
++ configured for a service are documented below. All configuration set
++ here will be applied even if this Helm chart doesn't recognize it.
++ ```
++
++ JupyterHub's native configuration accepts a list of service objects,
++ this Helm chart only accept a dictionary where each key represents the
++ name of a service and the value is the actual service objects.
++
++ When configuring JupyterHub services via this Helm chart, the `name`
++ field can be omitted as it can be implied by the dictionary key.
++ Further, the `api_token` field can be omitted as it will be
++ automatically generated as of version 1.1.0 of this Helm chart.
++
++ If you have an external service that needs to access the automatically
++ generated api_token for the service, you can access it from the `hub`
++ k8s Secret part of this Helm chart under the key
++ `hub.services.my-service-config-key.apiToken`.
++
++ Here is an example configuration of two services where the first
++ explicitly sets a name and api_token, while the second omits those and
++ lets the name be implied from the key name and the api_token be
++ automatically generated.
++
++ ```yaml
++ hub:
++ services:
++ my-service-1:
++ admin: true
++ name: my-explicitly-set-service-name
++ api_token: my-explicitly-set-api_token
++
++ # the name of the following service will be my-service-2
++ # the api_token of the following service will be generated
++ my-service-2: {}
++ ```
++
++ If you develop a Helm chart depending on the JupyterHub Helm chart and
++ want to let some Pod's environment variable be populated with the
++ api_token of a service registered like above, then do something along
++ these lines.
++
++ ```yaml
++ # ... container specification of a pod ...
++ env:
++ - name: MY_SERVICE_1_API_TOKEN
++ valueFrom:
++ secretKeyRef:
++ # Don't hardcode the name, use the globally accessible
++ # named templates part of the JupyterHub Helm chart.
++ name: {{ include "jupyterhub.hub.fullname" . }}
++ # Note below the use of the configuration key my-service-1
++ # rather than the explicitly set service name.
++ key: hub.services.my-service-1.apiToken
++ ```
++ properties:
++ name:
++ type: string
++ description: |
++ The name can be implied via the key name under which this
++ service is configured, and is due to that allowed to be
++ omitted in this Helm chart configuration of JupyterHub.
++ admin:
++ type: boolean
++ command:
++ type: [string, array]
++ url:
++ type: string
++ api_token:
++ type: [string, "null"]
++ description: |
++ The api_token will be automatically generated if not
++ explicitly set. It will also be exposed in via a k8s Secret
++ part of this Helm chart under a specific key.
++
++ See the documentation under
++ [`hub.services`](schema_hub.services) for details about this.
++ apiToken:
++ type: [string, "null"]
++ description: |
++ An alias for api_token provided for backward compatibility by
++ the JupyterHub Helm chart that will be transformed to
++ api_token.
++ loadRoles:
++ type: object
++ additionalProperties: true
++ description: |
++ This is where you should define JupyterHub roles and apply them to
++ JupyterHub users, groups, and services to grant them additional
++ permissions as defined in JupyterHub's RBAC system.
++
++ Complement this documentation with [JupyterHub's
++ documentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles)
++ about `load_roles`.
++
++ Note that while JupyterHub's native configuration `load_roles` accepts
++ a list of role objects, this Helm chart only accepts a dictionary where
++ each key represents the name of a role and the value is the actual
++ role object.
++
++ ```yaml
++ hub:
++ loadRoles:
++ teacher:
++ description: Access to users' information and group membership
++
++ # this role provides permissions to...
++ scopes: [users, groups]
++
++ # this role will be assigned to...
++ users: [erik]
++ services: [grading-service]
++ groups: [teachers]
++ ```
++
++ When configuring JupyterHub roles via this Helm chart, the `name`
++ field can be omitted as it can be implied by the dictionary key.
++ shutdownOnLogout:
++ type: [boolean, "null"]
++ description: *jupyterhub-native-config-description
++ templatePaths:
++ type: array
++ description: *jupyterhub-native-config-description
++ templateVars:
++ type: object
++ additionalProperties: true
++ description: *jupyterhub-native-config-description
++ serviceAccount: &serviceAccount
++ type: object
++ required: [create]
++ additionalProperties: false
++ description: |
++ Configuration for a k8s ServiceAccount dedicated for use by the
++ specific pod which this configuration is nested under.
++ properties:
++ create:
++ type: boolean
++ description: |
++ Whether or not to create the `ServiceAccount` resource.
++ name:
++ type: ["string", "null"]
++ description: |
++ This configuration serves multiple purposes:
++
++ - It will be the `serviceAccountName` referenced by related Pods.
++ - If `create` is set, the created ServiceAccount resource will be named like this.
++ - If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.
++
++ If not explicitly provided, a default name will be used.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Kubernetes annotations to apply to the k8s ServiceAccount.
++ extraPodSpec: &extraPodSpec-spec
++ type: object
++ additionalProperties: true
++ description: |
++ Arbitrary extra k8s pod specification as a YAML object. The default
++ value of this setting is an empty object, i.e. no extra configuration.
++ The value of this property is augmented to the pod specification as-is.
++
++ This is a powerful tool for expert k8s administrators with advanced
++ configuration requirements. This setting should only be used for
++ configuration that cannot be accomplished through the other settings.
++ Misusing this setting can break your deployment and/or compromise
++ your system security.
++
++ This is one of four related settings for inserting arbitrary pod
++ specification:
++
++ 1. hub.extraPodSpec
++ 2. proxy.chp.extraPodSpec
++ 3. proxy.traefik.extraPodSpec
++ 4. scheduling.userScheduler.extraPodSpec
++
++ One real-world use of these settings is to enable host networking. For
++ example, to configure host networking for the hub pod, add the
++ following to your helm configuration values:
++
++ ```yaml
++ hub:
++ extraPodSpec:
++ hostNetwork: true
++ dnsPolicy: ClusterFirstWithHostNet
++ ```
++
++ Likewise, to configure host networking for the proxy pod, add the
++ following:
++
++ ```yaml
++ proxy:
++ chp:
++ extraPodSpec:
++ hostNetwork: true
++ dnsPolicy: ClusterFirstWithHostNet
++ ```
++
++ N.B. Host networking has special security implications and can easily
++ break your deployment. This is an example—not an endorsement.
++
++ See [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)
++ for the latest pod resource specification.
++
++ proxy:
++ type: object
++ additionalProperties: false
++ properties:
++ chp:
++ type: object
++ additionalProperties: false
++ description: |
++ Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic
++ both to itself and to user pods.
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ networkPolicy: *networkPolicy-spec
++ extraCommandLineFlags:
++ type: array
++ description: |
++ A list of strings to be added as command line options when
++ starting
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options)
++ that will be expanded with Helm's template function `tpl` which
++ can render Helm template logic inside curly braces (`{{ ... }}`).
++
++ ```yaml
++ proxy:
++ chp:
++ extraCommandLineFlags:
++ - "--auto-rewrite"
++ - "--custom-header {{ .Values.myCustomStuff }}"
++ ```
++
++ Note that these will be appended last, and if you provide the same
++ flag twice, the last flag will be used, which mean you can
++ override the default flag values as well.
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the chp pod.
++
++ Environment variables are usually used here to:
++ - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values
++ - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ proxy:
++ chp:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ CHP_NAMESPACE:
++ name: CHP_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_CHP_NAMESPACE:
++ value: "my-prefix-$(CHP_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ livenessProbe: *probe-spec
++ readinessProbe: *probe-spec
++ resources: *resources-spec
++ defaultTarget:
++ type: [string, "null"]
++ description: |
++ Override the URL for the default routing target for the proxy.
++ Defaults to JupyterHub itself.
++ This will generally only have an effect while JupyterHub is not running,
++ as JupyterHub adds itself as the default target after it starts.
++ errorTarget:
++ type: [string, "null"]
++ description: |
++ Override the URL for the error target for the proxy.
++ Defaults to JupyterHub itself.
++ Useful to reduce load on the Hub
++ or produce more informative error messages than the Hub's default,
++ e.g. in highly customized deployments such as BinderHub.
++ See Configurable HTTP Proxy for details on implementing an error target.
++ extraPodSpec: *extraPodSpec-spec
++ secretToken:
++ type: [string, "null"]
++ description: |
++ ```{note}
++ As of version 1.0.0 this will automatically be generated and there is
++ no need to set it manually.
++
++ If you wish to reset a generated key, you can use `kubectl edit` on
++ the k8s Secret typically named `hub` and remove the
++ `hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret,
++ then perform a new `helm upgrade`.
++ ```
++
++ A 32-byte cryptographically secure randomly generated string used to
++ secure communications between the hub pod and the proxy pod running a
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
++ instance.
++
++ ```sh
++ # to generate a value, run
++ openssl rand -hex 32
++ ```
++
++ Changing this value will cause the proxy and hub pods to restart. It is good security
++ practice to rotate these values over time. If this secret leaks, *immediately* change
++ it to something else, or user data can be compromised.
++ service:
++ type: object
++ additionalProperties: false
++ description: |
++ Configuration of the k8s Service `proxy-public` which either will
++ point to the `autohttps` pod running Traefik for TLS termination, or
++ the `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from
++ users on the internet should always go through this k8s Service.
++
++ When this service targets the `autohttps` pod which then routes to the
++ `proxy` pod, a k8s Service named `proxy-http` will be added targeting
++ the `proxy` pod and only accepting HTTP traffic on port 80.
++ properties:
++ type:
++ enum: [ClusterIP, NodePort, LoadBalancer, ExternalName]
++ description: |
++ Default `LoadBalancer`.
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
++ to learn more about service types.
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the proxy service.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the service that is exposing the proxy.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ nodePorts:
++ type: object
++ additionalProperties: false
++ description: |
++ Object to set NodePorts to expose the service on for http and https.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)
++ for more details about NodePorts.
++ properties:
++ http:
++ type: [integer, "null"]
++ description: |
++ The HTTP port the proxy-public service should be exposed on.
++ https:
++ type: [integer, "null"]
++ description: |
++ The HTTPS port the proxy-public service should be exposed on.
++ disableHttpPort:
++ type: boolean
++ description: |
++ Default `false`.
++
++ If `true`, port 80 for incoming HTTP traffic will no longer be exposed. This should not be used with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false` as it would remove the only exposed port.
++ extraPorts:
++ type: array
++ description: |
++ Extra ports the k8s Service should accept incoming traffic on,
++ which will be redirected to either the `autohttps` pod (treafik)
++ or the `proxy` pod (chp).
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core)
++ for the structure of the items in this list.
++ loadBalancerIP:
++ type: [string, "null"]
++ description: |
++ The public IP address the proxy-public Kubernetes service should
++ be exposed on. This entry will end up at the configurable proxy
++ server that JupyterHub manages, which will direct traffic to user
++ pods at the `/user` path and the hub pod at the `/hub` path.
++
++ Set this if you want to use a fixed external IP address instead of
++ a dynamically acquired one. This is relevant if you have a domain
++ name that you want to point to a specific IP and want to ensure it
++ doesn't change.
++ loadBalancerSourceRanges:
++ type: array
++ description: |
++ A list of IP CIDR ranges that are allowed to access the load balancer service.
++ Defaults to allowing everyone to access it.
++ https:
++ type: object
++ additionalProperties: false
++ description: |
++ Object for customizing the settings for HTTPS used by the JupyterHub's proxy.
++ For more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https)
++ properties:
++ enabled:
++ type: [boolean, "null"]
++ description: |
++ Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided.
++ type:
++ enum: [null, "", letsencrypt, manual, offload, secret]
++ description: |
++ The type of HTTPS encryption that is used.
++ Decides on which ports and network policies are used for communication via HTTPS. Setting this to `secret` sets the type to manual HTTPS with a secret that has to be provided in the `https.secret` object.
++ Defaults to `letsencrypt`.
++ letsencrypt:
++ type: object
++ additionalProperties: false
++ properties:
++ contactEmail:
++ type: [string, "null"]
++ description: |
++ The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https).
++ Required for automatic HTTPS.
++ acmeServer:
++ type: [string, "null"]
++ description: |
++ Let's Encrypt is one of various ACME servers that can provide
++ a certificate, and by default their production server is used.
++
++ Let's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory
++ Let's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory
++ manual:
++ type: object
++ additionalProperties: false
++ description: |
++ Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`.
++ See [Set up manual HTTPS](setup-manual-https)
++ properties:
++ key:
++ type: [string, "null"]
++ description: |
++ The RSA private key to be used for HTTPS.
++ To be provided in the form of
++
++ ```
++ key: |
++ -----BEGIN RSA PRIVATE KEY-----
++ ...
++ -----END RSA PRIVATE KEY-----
++ ```
++ cert:
++ type: [string, "null"]
++ description: |
++ The certificate to be used for HTTPS.
++ To be provided in the form of
++
++ ```
++ cert: |
++ -----BEGIN CERTIFICATE-----
++ ...
++ -----END CERTIFICATE-----
++ ```
++ secret:
++ type: object
++ additionalProperties: false
++ description: |
++ Secret to be provided when setting `https.type` to `secret`.
++ properties:
++ name:
++ type: [string, "null"]
++ description: |
++ Name of the secret
++ key:
++ type: [string, "null"]
++ description: |
++ Path to the private key to be used for HTTPS.
++ Example: `'tls.key'`
++ crt:
++ type: [string, "null"]
++ description: |
++ Path to the certificate to be used for HTTPS.
++ Example: `'tls.crt'`
++ hosts:
++ type: array
++ description: |
++ You domain in list form.
++ Required for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https).
++ To be provided like:
++ ```
++ hosts:
++ -
++ ```
++ traefik:
++ type: object
++ additionalProperties: false
++ description: |
++ Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the traefik pod.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ networkPolicy: *networkPolicy-spec
++ extraInitContainers:
++ type: array
++ description: |
++ list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ proxy:
++ traefik:
++ extraInitContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the traefik pod.
++
++ Environment Variables here may be used to configure traefik.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes.
++
++ ```yaml
++ proxy:
++ traefik:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ TRAEFIK_NAMESPACE:
++ name: TRAEFIK_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_TRAEFIK_NAMESPACE:
++ value: "my-prefix-$(TRAEFIK_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ extraDynamicConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ This refers to traefik's post-startup configuration.
++
++ This Helm chart already provide such configuration, so this is a
++ place where you can merge in additional configuration. If you are
++ about to use this configuration, you may want to inspect the
++ default configuration declared
++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml).
++ extraPorts:
++ type: array
++ description: |
++ Extra ports for the traefik container within the autohttps pod
++ that you would like to expose, formatted in a k8s native way.
++ extraStaticConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ This refers to traefik's startup configuration.
++
++ This Helm chart already provide such configuration, so this is a
++ place where you can merge in additional configuration. If you are
++ about to use this configuration, you may want to inspect the
++ default configuration declared
++ [here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml).
++ extraVolumes: *extraVolumes-spec
++ extraVolumeMounts: *extraVolumeMounts-spec
++ hsts:
++ type: object
++ additionalProperties: false
++ required: [includeSubdomains, maxAge, preload]
++ description: |
++ This section regards a HTTP Strict-Transport-Security (HSTS)
++ response header. It can act as a request for a visiting web
++ browsers to enforce HTTPS on their end in for a given time into
++ the future, and optionally also for future requests to subdomains.
++
++ These settings relate to traefik configuration which we use as a
++ TLS termination proxy.
++
++ See [Mozilla's
++ documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
++ for more information.
++ properties:
++ includeSubdomains:
++ type: boolean
++ maxAge:
++ type: integer
++ preload:
++ type: boolean
++ image: *image-spec
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ extraPodSpec: *extraPodSpec-spec
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s labels for the proxy pod.
++
++ ```{note}
++ For consistency, this should really be located under
++ proxy.chp.labels but isn't for historical reasons.
++ ```
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ K8s annotations for the proxy pod.
++
++ ```{note}
++ For consistency, this should really be located under
++ proxy.chp.annotations but isn't for historical reasons.
++ ```
++ deploymentStrategy:
++ type: object
++ additionalProperties: false
++ properties:
++ rollingUpdate:
++ type: [string, "null"]
++ type:
++ type: [string, "null"]
++ description: |
++ While the proxy pod running
++ [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
++ could run in parallel, two instances running in parallel wouldn't
++ both receive updates from JupyterHub regarding how it should route
++ traffic. Due to this we default to using a deployment strategy of
++ Recreate instead of RollingUpdate.
++ secretSync:
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration section refers to configuration of the sidecar
++ container in the autohttps pod running next to its traefik container
++ responsible for TLS termination.
++
++ The purpose of this container is to store away and load TLS
++ certificates from a k8s Secret. The TLS certificates are acquired by
++ the ACME client (LEGO) that is running within the traefik container,
++ where traefik is using them for TLS termination.
++ properties:
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ resources: *resources-spec
++
++ singleuser:
++ type: object
++ additionalProperties: false
++ description: |
++ Options for customizing the environment that is provided to the users after they log in.
++ properties:
++ networkPolicy: *networkPolicy-spec
++ podNameTemplate:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template).
++ cpu:
++ type: object
++ additionalProperties: false
++ description: |
++ Set CPU limits & guarantees that are enforced for each user.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
++ for more info.
++ properties:
++ limit:
++ type: [number, "null"]
++ guarantee:
++ type: [number, "null"]
++ memory:
++ type: object
++ additionalProperties: false
++ description: |
++ Set Memory limits & guarantees that are enforced for each user.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
++ for more info.
++ properties:
++ limit:
++ type: [number, string, "null"]
++ guarantee:
++ type: [number, string, "null"]
++ description: |
++ Note that this field is referred to as *requests* by the Kubernetes API.
++ image: *image-spec
++ initContainers:
++ type: array
++ description: |
++ list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
++
++ ```yaml
++ singleuser:
++ initContainers:
++ - name: init-myservice
++ image: busybox:1.28
++ command: ['sh', '-c', 'command1']
++ - name: init-mydb
++ image: busybox:1.28
++ command: ['sh', '-c', 'command2']
++ ```
++ profileList:
++ type: array
++ description: |
++ For more information about the profile list, see [KubeSpawner's
++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner)
++ as this is simply a passthrough to that configuration.
++
++ ```{note}
++ The image-pullers are aware of the overrides of images in
++ `singleuser.profileList` but they won't be if you configure it in
++ JupyterHub's configuration of '`c.KubeSpawner.profile_list`.
++ ```
++
++ ```yaml
++ singleuser:
++ profileList:
++ - display_name: "Default: Shared, 8 CPU cores"
++ description: "Your code will run on a shared machine with CPU only."
++ default: True
++ - display_name: "Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU"
++ description: "Your code will run a personal machine with a GPU."
++ kubespawner_override:
++ extra_resource_limits:
++ nvidia.com/gpu: "1"
++ ```
++ extraFiles: *extraFiles
++ extraEnv:
++ type: [object, array]
++ additionalProperties: true
++ description: |
++ Extra environment variables that should be set for the user pods.
++
++ String literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which
++ is a part of Kubernetes. Note that the user pods will already have
++ access to a set of environment variables that you can use, like
++ `JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these
++ inspect [this source
++ code](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763).
++
++ ```yaml
++ singleuser:
++ extraEnv:
++ # basic notation (for literal values only)
++ MY_ENV_VARS_NAME1: "my env var value 1"
++
++ # explicit notation (the "name" field takes precedence)
++ USER_NAMESPACE:
++ name: USER_NAMESPACE
++ valueFrom:
++ fieldRef:
++ fieldPath: metadata.namespace
++
++ # implicit notation (the "name" field is implied)
++ PREFIXED_USER_NAMESPACE:
++ value: "my-prefix-$(USER_NAMESPACE)"
++ SECRET_VALUE:
++ valueFrom:
++ secretKeyRef:
++ name: my-k8s-secret
++ key: password
++ ```
++
++ For more information, see the [Kubernetes EnvVar
++ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).
++ nodeSelector: *nodeSelector-spec
++ extraTolerations: *tolerations-spec
++ extraNodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Affinities describe where pods prefer or require to be scheduled, they
++ may prefer or require a node where they are to be scheduled to have a
++ certain label (node affinity). They may also require to be scheduled
++ in proximity or with a lack of proximity to another pod (pod affinity
++ and anti pod affinity).
++
++ See the [Kubernetes
++ docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
++ for more info.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core)
++ objects.
++ extraPodAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ See the description of `singleuser.extraNodeAffinity`.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
++ objects.
++ extraPodAntiAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ See the description of `singleuser.extraNodeAffinity`.
++ properties:
++ required:
++ type: array
++ description: |
++ Pass this field an array of
++ [`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)
++ objects.
++ preferred:
++ type: array
++ description: |
++ Pass this field an array of
++ [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)
++ objects.
++ cloudMetadata:
++ type: object
++ additionalProperties: false
++ required: [blockWithIptables, ip]
++ description: |
++ Please refer to dedicated section in [the Helm chart
++ documentation](block-metadata-iptables) for more information about
++ this.
++ properties:
++ blockWithIptables:
++ type: boolean
++ ip:
++ type: string
++
++ cmd:
++ type: [array, string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd).
++ The default is "jupyterhub-singleuser".
++ Use `cmd: null` to launch a custom CMD from the image,
++ which must launch jupyterhub-singleuser or an equivalent process eventually.
++ For example: Jupyter's docker-stacks images.
++ defaultUrl:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url).
++ # FIXME: name mismatch, named events_enabled in kubespawner
++ events:
++ type: [boolean, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled).
++ extraAnnotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations).
++ extraContainers:
++ type: array
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers).
++ extraLabels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels).
++ extraPodConfig:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config).
++ extraResource:
++ type: object
++ additionalProperties: false
++ properties:
++ # FIXME: name mismatch, named extra_resource_guarantees in kubespawner
++ guarantees:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees).
++ # FIXME: name mismatch, named extra_resource_limits in kubespawner
++ limits:
++ type: object
++ additionalProperties: true
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits).
++ fsGid:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid).
++ lifecycleHooks:
++ type: object
++ additionalProperties: false
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks).
++ properties:
++ postStart:
++ type: object
++ additionalProperties: true
++ preStop:
++ type: object
++ additionalProperties: true
++ networkTools:
++ type: object
++ additionalProperties: false
++ description: |
++ This configuration section refers to configuration of a conditionally
++ created initContainer for the user pods with a purpose to block a
++ specific IP address.
++
++ This initContainer will be created if
++ [`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)
++ is set to true.
++ properties:
++ image: *image-spec
++ resources: *resources-spec
++ # FIXME: name mismatch, named service_account in kubespawner
++ serviceAccountName:
++ type: [string, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account).
++ startTimeout:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout).
++ storage:
++ type: object
++ additionalProperties: false
++ required: [type, homeMountPath]
++ description: |
++ This section configures KubeSpawner directly to some extent but also
++ indirectly through Helm chart specific configuration options such as
++ [`singleuser.storage.type`](schema_singleuser.storage.type).
++ properties:
++ capacity:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_capacity`.
++
++ See the [KubeSpawner
++ documentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html)
++ for more information.
++ dynamic:
++ type: object
++ additionalProperties: false
++ properties:
++ pvcNameTemplate:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.pvc_name_template` which will be the
++ resource name of the PVC created by KubeSpawner for each user
++ if needed.
++ storageAccessModes:
++ type: array
++ items:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_access_modes`.
++
++ See KubeSpawners documentation and [the k8s
++ documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
++ for more information.
++ storageClass:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.storage_class`, which can be an
++ explicit StorageClass to dynamically provision storage for the
++ PVC that KubeSpawner will create.
++
++ There is of a default StorageClass available in k8s clusters
++ for use if this is unspecified.
++ volumeNameTemplate:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.volume_name_template`, which is the
++ name to reference from the containers volumeMounts section.
++ extraLabels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Configures `KubeSpawner.storage_extra_labels`. Note that these
++ labels are set on the PVC during creation only and won't be
++ updated after creation.
++ extraVolumeMounts: *extraVolumeMounts-spec
++ extraVolumes: *extraVolumes-spec
++ homeMountPath:
++ type: string
++ description: |
++ The location within the container where the home folder storage
++ should be mounted.
++ static:
++ type: object
++ additionalProperties: false
++ properties:
++ pvcName:
++ type: [string, "null"]
++ description: |
++ Configures `KubeSpawner.pvc_claim_name` to reference
++ pre-existing storage.
++ subPath:
++ type: [string, "null"]
++ description: |
++ Configures the `subPath` field of a
++ `KubeSpawner.volume_mounts` entry added by the Helm chart.
++
++ Path within the volume from which the container's volume
++ should be mounted.
++ type:
++ enum: [dynamic, static, none]
++ description: |
++ Decide if you want storage to be provisioned dynamically
++ (dynamic), or if you want to attach existing storage (static), or
++ don't want any storage to be attached (none).
++ allowPrivilegeEscalation:
++ type: [boolean, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation).
++ uid:
++ type: [integer, "null"]
++ description: |
++ Passthrough configuration for
++ [KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid).
++
++ This dictates as what user the main container will start up as.
++
++ As an example of when this is needed, consider if you want to enable
++ sudo rights for some of your users. This can be done by starting up as
++ root, enabling it from the container in a startup script, and then
++ transitioning to the normal user.
++
++ Default is 1000, set to null to use the container's default.
++
++ scheduling:
++ type: object
++ additionalProperties: false
++ description: |
++ Objects for customizing the scheduling of various pods on the nodes and
++ related labels.
++ properties:
++ userScheduler:
++ type: object
++ additionalProperties: false
++ required: [enabled, plugins, pluginConfig, logLevel]
++ description: |
++ The user scheduler is making sure that user pods are scheduled
++ tight on nodes, this is useful for autoscaling of user node pools.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enables the user scheduler.
++ revisionHistoryLimit: *revisionHistoryLimit
++ replicas:
++ type: integer
++ description: |
++ You can have multiple schedulers to share the workload or improve
++ availability on node failure.
++ image: *image-spec
++ pdb: *pdb-spec
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the userScheduler pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra annotations to add to the user-scheduler pods.
++ containerSecurityContext: *containerSecurityContext-spec
++ logLevel:
++ type: integer
++ description: |
++ Corresponds to the verbosity level of logging made by the
++ kube-scheduler binary running within the user-scheduler pod.
++ plugins:
++ type: object
++ additionalProperties: true
++ description: |
++ These plugins refers to kube-scheduler plugins as documented
++ [here](https://kubernetes.io/docs/reference/scheduling/config/).
++
++ The user-scheduler is really just a kube-scheduler configured in a
++ way to pack users tight on nodes using these plugins. See
++ values.yaml for information about the default plugins.
++ pluginConfig:
++ type: array
++ description: |
++ Individually activated plugins can be configured further.
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ extraPodSpec: *extraPodSpec-spec
++ podPriority:
++ type: object
++ additionalProperties: false
++ description: |
++ Pod Priority is used to allow real users evict user placeholder pods
++ that in turn by entering a Pending state can trigger a scale up by a
++ cluster autoscaler.
++
++ Having this option enabled only make sense if the following conditions
++ are met:
++
++ 1. A cluster autoscaler is installed.
++ 2. user-placeholer pods are configured to have a priority equal or
++ higher than the cluster autoscaler's "priority cutoff" so that the
++ cluster autoscaler scales up a node in advance for a pending user
++ placeholder pod.
++ 3. Normal user pods have a higher priority than the user-placeholder
++ pods.
++ 4. Image puller pods have a priority between normal user pods and
++ user-placeholder pods.
++
++ Note that if the default priority cutoff if not configured on cluster
++ autoscaler, it will currently default to 0, and that in the future
++ this is meant to be lowered. If your cloud provider is installing the
++ cluster autoscaler for you, they may also configure this specifically.
++
++ Recommended settings for a cluster autoscaler...
++
++ ... with a priority cutoff of -10 (GKE):
++
++ ```yaml
++ podPriority:
++ enabled: true
++ globalDefault: false
++ defaultPriority: 0
++ imagePullerPriority: -5
++ userPlaceholderPriority: -10
++ ```
++
++ ... with a priority cutoff of 0:
++
++ ```yaml
++ podPriority:
++ enabled: true
++ globalDefault: true
++ defaultPriority: 10
++ imagePullerPriority: 5
++ userPlaceholderPriority: 0
++ ```
++ properties:
++ enabled:
++ type: boolean
++ globalDefault:
++ type: boolean
++ description: |
++ Warning! This will influence all pods in the cluster.
++
++ The priority a pod usually get is 0. But this can be overridden
++ with a PriorityClass resource if it is declared to be the global
++ default. This configuration option allows for the creation of such
++ global default.
++ defaultPriority:
++ type: integer
++ description: |
++ The actual value for the default pod priority.
++ imagePullerPriority:
++ type: integer
++ description: |
++ The actual value for the [hook|continuous]-image-puller pods' priority.
++ userPlaceholderPriority:
++ type: integer
++ description: |
++ The actual value for the user-placeholder pods' priority.
++ userPlaceholder:
++ type: object
++ additionalProperties: false
++ description: |
++ User placeholders simulate users but will thanks to PodPriority be
++ evicted by the cluster autoscaler if a real user shows up. In this way
++ placeholders allow you to create a headroom for the real users and
++ reduce the risk of a user having to wait for a node to be added. Be
++ sure to use the the continuous image puller as well along with
++ placeholders, so the images are also available when real users arrive.
++
++ To test your setup efficiently, you can adjust the amount of user
++ placeholders with the following command:
++ ```sh
++ # Configure to have 3 user placeholders
++ kubectl scale sts/user-placeholder --replicas=3
++ ```
++ properties:
++ enabled:
++ type: boolean
++ image: *image-spec
++ revisionHistoryLimit: *revisionHistoryLimit
++ replicas:
++ type: integer
++ description: |
++ How many placeholder pods would you like to have?
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the userPlaceholder pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra annotations to add to the placeholder pods.
++ resources:
++ type: object
++ additionalProperties: true
++ description: |
++ Unless specified here, the placeholder pods will request the same
++ resources specified for the real singleuser pods.
++ containerSecurityContext: *containerSecurityContext-spec
++ corePods:
++ type: object
++ additionalProperties: false
++ description: |
++ These settings influence the core pods like the hub, proxy and
++ user-scheduler pods.
++ These settings influence all pods considered core pods, namely:
++
++ - hub
++ - proxy
++ - autohttps
++ - hook-image-awaiter
++ - user-scheduler
++
++ By defaults, the tolerations are:
++
++ - hub.jupyter.org/dedicated=core:NoSchedule
++ - hub.jupyter.org_dedicated=core:NoSchedule
++
++ Note that tolerations set here are combined with the respective
++ components dedicated tolerations, and that `_` is available in case
++ `/` isn't allowed in the clouds tolerations.
++ properties:
++ tolerations: *tolerations-spec
++ nodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Where should pods be scheduled? Perhaps on nodes with a certain
++ label is preferred or even required?
++ properties:
++ matchNodePurpose:
++ enum: [ignore, prefer, require]
++ description: |
++ Decide if core pods *ignore*, *prefer* or *require* to
++ schedule on nodes with this label:
++ ```
++ hub.jupyter.org/node-purpose=core
++ ```
++ userPods:
++ type: object
++ additionalProperties: false
++ description: |
++ These settings influence all pods considered user pods, namely:
++
++ - user-placeholder
++ - hook-image-puller
++ - continuous-image-puller
++ - jupyter-
++
++ By defaults, the tolerations are:
++
++ - hub.jupyter.org/dedicated=core:NoSchedule
++ - hub.jupyter.org_dedicated=core:NoSchedule
++
++ Note that tolerations set here are combined with the respective
++ components dedicated tolerations, and that `_` is available in case
++ `/` isn't allowed in the clouds tolerations.
++ properties:
++ tolerations: *tolerations-spec
++ nodeAffinity:
++ type: object
++ additionalProperties: false
++ description: |
++ Where should pods be scheduled? Perhaps on nodes with a certain
++ label is preferred or even required?
++ properties:
++ matchNodePurpose:
++ enum: [ignore, prefer, require]
++ description: |
++ Decide if user pods *ignore*, *prefer* or *require* to
++ schedule on nodes with this label:
++ ```
++ hub.jupyter.org/node-purpose=user
++ ```
++
++ ingress:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enable the creation of a Kubernetes Ingress to proxy-public service.
++
++ See [Advanced Topics — Zero to JupyterHub with Kubernetes
++ 0.7.0 documentation](ingress)
++ for more details.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the Ingress resource.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
++ for more details about annotations.
++ ingressClassName:
++ type: [string, "null"]
++ description: |
++ Maps directly to the Ingress resource's `spec.ingressClassName``.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class)
++ for more details.
++ hosts:
++ type: array
++ description: |
++ List of hosts to route requests to the proxy.
++ pathSuffix:
++ type: [string, "null"]
++ description: |
++ Suffix added to Ingress's routing path pattern.
++
++ Specify `*` if your ingress matches path by glob pattern.
++ pathType:
++ enum: [Prefix, Exact, ImplementationSpecific]
++ description: |
++ The path type to use. The default value is 'Prefix'.
++
++ See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
++ for more details about path types.
++ tls:
++ type: array
++ description: |
++ TLS configurations for Ingress.
++
++ See [the Kubernetes
++ documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)
++ for more details about annotations.
++
++ prePuller:
++ type: object
++ additionalProperties: false
++ required: [hook, continuous]
++ properties:
++ revisionHistoryLimit: *revisionHistoryLimit
++ labels:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Extra labels to add to the pre puller job pods.
++
++ See the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
++ to learn more about labels.
++ annotations:
++ type: object
++ additionalProperties: false
++ patternProperties: *labels-and-annotations-patternProperties
++ description: |
++ Annotations to apply to the hook and continous image puller pods. One example use case is to
++ disable istio sidecars which could interfere with the image pulling.
++ resources:
++ type: object
++ additionalProperties: true
++ description: |
++ These are standard Kubernetes resources with requests and limits for
++ cpu and memory. They will be used on the containers in the pods
++ pulling images. These should be set extremely low as the containers
++ shut down directly or is a pause container that just idles.
++
++ They were made configurable as usage of ResourceQuota may require
++ containers in the namespace to have explicit resources set.
++ extraTolerations: *tolerations-spec
++ hook:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ See the [*optimization
++ section*](pulling-images-before-users-arrive)
++ for more details.
++ properties:
++ enabled:
++ type: boolean
++ pullOnlyOnChanges:
++ type: boolean
++ description: |
++ Pull only if changes have been made to the images to pull, or more
++ accurately if the hook-image-puller daemonset has changed in any
++ way.
++ podSchedulingWaitDuration:
++ description: |
++ The `hook-image-awaiter` has a criteria to await all the
++ `hook-image-puller` DaemonSet's pods to both schedule and finish
++ their image pulling. This flag can be used to relax this criteria
++ to instead only await the pods that _has already scheduled_ to
++ finish image pulling after a certain duration.
++
++ The value of this is that sometimes the newly created
++ `hook-image-puller` pods cannot be scheduled because nodes are
++ full, and then it probably won't make sense to block a `helm
++ upgrade`.
++
++ An infinite duration to wait for pods to schedule can be
++ represented by `-1`. This was the default behavior of version
++ 0.9.0 and earlier.
++ type: integer
++ nodeSelector: *nodeSelector-spec
++ tolerations: *tolerations-spec
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++ resources: *resources-spec
++ serviceAccount: *serviceAccount
++ continuous:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ See the [*optimization
++ section*](pulling-images-before-users-arrive)
++ for more details.
++
++ ```{note}
++ If used with a Cluster Autoscaler (an autoscaling node pool), also add
++ user-placeholders and enable pod priority.
++ ```
++ properties:
++ enabled:
++ type: boolean
++ pullProfileListImages:
++ type: boolean
++ description: |
++ The singleuser.profileList configuration can provide a selection of
++ images. This option determines if all images identified there should
++ be pulled, both by the hook and continuous pullers.
++
++ Images are looked for under `kubespawner_override`, and also
++ `profile_options.choices.kubespawner_override` since version 3.2.0.
++
++ The reason to disable this, is that if you have for example 10 images
++ which start pulling in order from 1 to 10, a user that arrives and
++ wants to start a pod with image number 10 will need to wait for all
++ images to be pulled, and then it may be preferable to just let the
++ user arriving wait for a single image to be pulled on arrival.
++ extraImages:
++ type: object
++ additionalProperties: false
++ description: |
++ See the [*optimization section*](images-that-will-be-pulled) for more
++ details.
++
++ ```yaml
++ prePuller:
++ extraImages:
++ my-extra-image-i-want-pulled:
++ name: jupyter/all-spark-notebook
++ tag: 2343e33dec46
++ ```
++ patternProperties:
++ ".*":
++ type: object
++ additionalProperties: false
++ required: [name, tag]
++ properties:
++ name:
++ type: string
++ tag:
++ type: string
++ containerSecurityContext: *containerSecurityContext-spec
++ pause:
++ type: object
++ additionalProperties: false
++ description: |
++ The image-puller pods rely on initContainer to pull all images, and
++ their actual container when they are done is just running a `pause`
++ container. These are settings for that pause container.
++ properties:
++ containerSecurityContext: *containerSecurityContext-spec
++ image: *image-spec
++
++ custom:
++ type: object
++ additionalProperties: true
++ description: |
++ Additional values to pass to the Hub.
++ JupyterHub will not itself look at these,
++ but you can read values in your own custom config via `hub.extraConfig`.
++ For example:
++
++ ```yaml
++ custom:
++ myHost: "https://example.horse"
++ hub:
++ extraConfig:
++ myConfig.py: |
++ c.MyAuthenticator.host = get_config("custom.myHost")
++ ```
++
++ cull:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ description: |
++ The
++ [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler)
++ can run as a JupyterHub managed service to _cull_ running servers.
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Enable/disable use of jupyter-idle-culler.
++ users:
++ type: [boolean, "null"]
++ description: See the `--cull-users` flag.
++ adminUsers:
++ type: [boolean, "null"]
++ description: See the `--cull-admin-users` flag.
++ removeNamedServers:
++ type: [boolean, "null"]
++ description: See the `--remove-named-servers` flag.
++ timeout:
++ type: [integer, "null"]
++ description: See the `--timeout` flag.
++ every:
++ type: [integer, "null"]
++ description: See the `--cull-every` flag.
++ concurrency:
++ type: [integer, "null"]
++ description: See the `--concurrency` flag.
++ maxAge:
++ type: [integer, "null"]
++ description: See the `--max-age` flag.
++
++ debug:
++ type: object
++ additionalProperties: false
++ required: [enabled]
++ properties:
++ enabled:
++ type: boolean
++ description: |
++ Increases the loglevel throughout the resources in the Helm chart.
++
++ rbac:
++ type: object
++ additionalProperties: false
++ required: [create]
++ properties:
++ enabled:
++ type: boolean
++ # This schema entry is needed to help us print a more helpful error
++ # message in NOTES.txt if hub.fsGid is set.
++ #
++ description: |
++ ````{note}
++ Removed in version 2.0.0. If you have been using `rbac.enable=false`
++ (strongly discouraged), then the equivalent configuration would be:
++
++ ```yaml
++ rbac:
++ create: false
++ hub:
++ serviceAccount:
++ create: false
++ proxy:
++ traefik:
++ serviceAccount:
++ create: false
++ scheduling:
++ userScheduler:
++ serviceAccount:
++ create: false
++ prePuller:
++ hook:
++ serviceAccount:
++ create: false
++ ```
++ ````
++ create:
++ type: boolean
++ description: |
++ Decides if (Cluster)Role and (Cluster)RoleBinding resources are
++ created and bound to the configured serviceAccounts.
++
++ global:
++ type: object
++ additionalProperties: true
++ properties:
++ safeToShowValues:
++ type: boolean
++ description: |
++ A flag that should only be set to true temporarily when experiencing a
++ deprecation message that contain censored content that you wish to
++ reveal.
+diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
+index 2f5cbca..41e108d 100755
+--- a/applications/jupyterhub/deploy/values.yaml
++++ b/applications/jupyterhub/deploy/values.yaml
+@@ -1,4 +1,4 @@
+-harness:
++harness: # EDIT: CLOUDHARNESS
+ subdomain: hub
+ service:
+ auto: false
+@@ -31,6 +31,11 @@ harness:
+ fullnameOverride: ""
+ nameOverride:
+
++# enabled is ignored by the jupyterhub chart itself, but a chart depending on
++# the jupyterhub chart conditionally can make use this config option as the
++# condition.
++enabled:
++
+ # custom can contain anything you want to pass to the hub pod, as all passed
+ # Helm template values will be made available there.
+ custom: {}
+@@ -54,10 +59,11 @@ imagePullSecrets: []
+ # ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
+ # the proxy pod.
+ hub:
++ revisionHistoryLimit:
+ config:
+ JupyterHub:
+ admin_access: true
+- authenticator_class: keycloak
++ authenticator_class: keycloak # EDIT: CLOUDHARNESS
+ service:
+ type: ClusterIP
+ annotations: {}
+@@ -68,7 +74,6 @@ hub:
+ baseUrl: /
+ cookieSecret:
+ initContainers: []
+- fsGid: 1000
+ nodeSelector: {}
+ tolerations: []
+ concurrentSpawnLimit: 64
+@@ -106,37 +111,38 @@ hub:
+ extraVolumes: []
+ extraVolumeMounts: []
+ image:
+- name: jupyterhub/k8s-hub
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-hub
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
++ podSecurityContext:
++ fsGroup: 1000
+ containerSecurityContext:
+ runAsUser: 1000
+ runAsGroup: 1000
+ allowPrivilegeEscalation: false
+ lifecycle: {}
++ loadRoles: {}
+ services: {}
+ pdb:
+ enabled: false
+ maxUnavailable:
+ minAvailable: 1
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- ## egress for JupyterHub already includes Kubernetes internal DNS and
+- ## access to the proxy, but can be restricted further, but ensure to allow
+- ## access to the Kubernetes API server that couldn't be pinned ahead of
+- ## time.
+- ##
+- ## ref: https://stackoverflow.com/a/59016417/2220152
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+- allowNamedServers: true
++ allowNamedServers: true # EDIT: CLOUDHARNESS
+ namedServerLimitPerUser:
+ authenticatePrometheus:
+ redirectToServer:
+@@ -163,11 +169,13 @@ hub:
+ timeoutSeconds: 1
+ existingSecret:
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+
+ rbac:
+- enabled: true
++ create: true
+
+ # proxy relates to the proxy pod, the proxy-public service, and the autohttps
+ # pod and proxy-http service.
+@@ -202,7 +210,7 @@ proxy:
+ rollingUpdate:
+ # service relates to the proxy-public service
+ service:
+- type: NodePort
++ type: NodePort # EDIT: CLOUDHARNESS
+ labels: {}
+ annotations: {}
+ nodePorts:
+@@ -215,13 +223,17 @@ proxy:
+ # chp relates to the proxy pod, which is responsible for routing traffic based
+ # on dynamic configuration sent from JupyterHub to CHP's REST API.
+ chp:
++ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: jupyterhub/configurable-http-proxy
+- tag: 4.5.0 # https://github.com/jupyterhub/configurable-http-proxy/releases
++ name: quay.io/jupyterhub/configurable-http-proxy
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
++ #
++ tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
+ pullPolicy:
+ pullSecrets: []
+ extraCommandLineFlags: []
+@@ -229,11 +241,14 @@ proxy:
+ enabled: true
+ initialDelaySeconds: 60
+ periodSeconds: 10
++ failureThreshold: 30
++ timeoutSeconds: 3
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 2
+ failureThreshold: 1000
++ timeoutSeconds: 1
+ resources: {}
+ defaultTarget:
+ errorTarget:
+@@ -241,12 +256,16 @@ proxy:
+ nodeSelector: {}
+ tolerations: []
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+@@ -257,13 +276,17 @@ proxy:
+ # traefik relates to the autohttps pod, which is responsible for TLS
+ # termination when proxy.https.type=letsencrypt.
+ traefik:
++ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: traefik
+- tag: v2.4.11 # ref: https://hub.docker.com/_/traefik?tab=tags
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
++ #
++ tag: "v2.10.7" # ref: https://hub.docker.com/_/traefik?tab=tags
+ pullPolicy:
+ pullSecrets: []
+ hsts:
+@@ -272,6 +295,7 @@ proxy:
+ maxAge: 15724800 # About 6 months
+ resources: {}
+ labels: {}
++ extraInitContainers: []
+ extraEnv: {}
+ extraVolumes: []
+ extraVolumeMounts: []
+@@ -283,10 +307,14 @@ proxy:
+ networkPolicy:
+ enabled: true
+ ingress: []
+- egress:
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: true
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+@@ -294,6 +322,8 @@ proxy:
+ maxUnavailable:
+ minAvailable: 1
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+ secretSync:
+@@ -302,8 +332,8 @@ proxy:
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: jupyterhub/k8s-secret-sync
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-secret-sync
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
+@@ -342,29 +372,27 @@ singleuser:
+ preferred: []
+ networkTools:
+ image:
+- name: jupyterhub/k8s-network-tools
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-network-tools
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
++ resources: {}
+ cloudMetadata:
+ # block set to true will append a privileged initContainer using the
+ # iptables to block the sensitive metadata server at the provided ip.
+- blockWithIptables: false
++ blockWithIptables: true
++ ip: 169.254.169.254
+ networkPolicy:
+- enabled: false
++ enabled: true
+ ingress: []
+- egress:
+- # Required egress to communicate with the hub and DNS servers will be
+- # augmented to these egress rules.
+- #
+- # This default rule explicitly allows all outbound traffic from singleuser
+- # pods, except to a typical IP used to return metadata that can be used by
+- # someone with malicious intent.
+- - to:
+- - ipBlock:
+- cidr: 0.0.0.0/0
+- except:
+- - 169.254.169.254/32
++ egress: []
++ egressAllowRules:
++ cloudMetadataServer: false
++ dnsPortsCloudMetadataServer: true
++ dnsPortsKubeSystemNamespace: true
++ dnsPortsPrivateIPs: true
++ nonPrivateIPs: true
++ privateIPs: false
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+ events: true
+@@ -376,6 +404,7 @@ singleuser:
+ lifecycleHooks: {}
+ initContainers: []
+ extraContainers: []
++ allowPrivilegeEscalation: false
+ uid: 1000
+ fsGid: 100
+ serviceAccountName:
+@@ -387,29 +416,29 @@ singleuser:
+ static:
+ pvcName:
+ subPath: "{username}"
+- capacity: 10Mi
+- homeMountPath: /home/workspace
++ capacity: 10Mi # EDIT: CLOUDHARNESS
++ homeMountPath: /home/workspace # EDIT: CLOUDHARNESS
+ dynamic:
+ storageClass:
+- pvcNameTemplate: jupyter-{username}
+- volumeNameTemplate: jupyter-{username}
++ pvcNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
++ volumeNameTemplate: jupyter-{username} # EDIT: CLOUDHARNESS
+ storageAccessModes: [ReadWriteOnce]
+ image:
+- name: jupyter/base-notebook
+- tag: "hub-1.4.2"
++ name: quay.io/jupyterhub/k8s-singleuser-sample
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ startTimeout: 300
+ cpu:
+- limit: 0.4
+- guarantee: 0.05
++ limit: 0.4 # EDIT: CLOUDHARNESS
++ guarantee: 0.05 # EDIT: CLOUDHARNESS
+ memory:
+- limit: 0.5G
+- guarantee: 0.1G
++ limit: 0.5G # EDIT: CLOUDHARNESS
++ guarantee: 0.1G # EDIT: CLOUDHARNESS
+ extraResource:
+ limits: {}
+ guarantees: {}
+- cmd: /usr/local/bin/start-singleuser.sh
++ cmd: jupyterhub-singleuser
+ defaultUrl:
+ extraPodConfig: {}
+ profileList: []
+@@ -417,74 +446,146 @@ singleuser:
+ # scheduling relates to the user-scheduler pods and user-placeholder pods.
+ scheduling:
+ userScheduler:
+- enabled: false
++ enabled: false # EDIT: CLOUDHARNESS
++ revisionHistoryLimit:
+ replicas: 2
+ logLevel: 4
++ # plugins are configured on the user-scheduler to make us score how we
++ # schedule user pods in a way to help us schedule on the most busy node. By
++ # doing this, we help scale down more effectively. It isn't obvious how to
++ # enable/disable scoring plugins, and configure them, to accomplish this.
++ #
+ # plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
++ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
++ #
+ plugins:
+ score:
++ # These scoring plugins are enabled by default according to
++ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
++ # 2022-02-22.
++ #
++ # Enabled with high priority:
++ # - NodeAffinity
++ # - InterPodAffinity
++ # - NodeResourcesFit
++ # - ImageLocality
++ # Remains enabled with low default priority:
++ # - TaintToleration
++ # - PodTopologySpread
++ # - VolumeBinding
++ # Disabled for scoring:
++ # - NodeResourcesBalancedAllocation
++ #
+ disabled:
+- - name: SelectorSpread
+- - name: TaintToleration
+- - name: PodTopologySpread
++ # We disable these plugins (with regards to scoring) to not interfere
++ # or complicate our use of NodeResourcesFit.
+ - name: NodeResourcesBalancedAllocation
+- - name: NodeResourcesLeastAllocated
+ # Disable plugins to be allowed to enable them again with a different
+ # weight and avoid an error.
+- - name: NodePreferAvoidPods
+ - name: NodeAffinity
+ - name: InterPodAffinity
++ - name: NodeResourcesFit
+ - name: ImageLocality
+ enabled:
+- - name: NodePreferAvoidPods
+- weight: 161051
+ - name: NodeAffinity
+ weight: 14631
+ - name: InterPodAffinity
+ weight: 1331
+- - name: NodeResourcesMostAllocated
++ - name: NodeResourcesFit
+ weight: 121
+ - name: ImageLocality
+ weight: 11
++ pluginConfig:
++ # Here we declare that we should optimize pods to fit based on a
++ # MostAllocated strategy instead of the default LeastAllocated.
++ - name: NodeResourcesFit
++ args:
++ scoringStrategy:
++ resources:
++ - name: cpu
++ weight: 1
++ - name: memory
++ weight: 1
++ type: MostAllocated
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ # IMPORTANT: Bumping the minor version of this binary should go hand in
+- # hand with an inspection of the user-scheduelrs RBAC resources
+- # that we have forked.
+- name: k8s.gcr.io/kube-scheduler
+- tag: v1.19.13 # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
++ # hand with an inspection of the user-scheduelr's RBAC
++ # resources that we have forked in
++ # templates/scheduling/user-scheduler/rbac.yaml.
++ #
++ # Debugging advice:
++ #
++ # - Is configuration of kube-scheduler broken in
++ # templates/scheduling/user-scheduler/configmap.yaml?
++ #
++ # - Is the kube-scheduler binary's compatibility to work
++ # against a k8s api-server that is too new or too old?
++ #
++ # - You can update the GitHub workflow that runs tests to
++ # include "deploy/user-scheduler" in the k8s namespace report
++ # and reduce the user-scheduler deployments replicas to 1 in
++ # dev-config.yaml to get relevant logs from the user-scheduler
++ # pods. Inspect the "Kubernetes namespace report" action!
++ #
++ # - Typical failures are that kube-scheduler fails to search for
++ # resources via its "informers", and won't start trying to
++ # schedule pods before they succeed which may require
++ # additional RBAC permissions or that the k8s api-server is
++ # aware of the resources.
++ #
++ # - If "successfully acquired lease" can be seen in the logs, it
++ # is a good sign kube-scheduler is ready to schedule pods.
++ #
++ name: registry.k8s.io/kube-scheduler
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow. The minor version is pinned in the
++ # workflow, and should be updated there if a minor version bump is done
++ # here. We aim to stay around 1 minor version behind the latest k8s
++ # version.
++ #
++ tag: "v1.28.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
+ pullPolicy:
+ pullSecrets: []
+ nodeSelector: {}
+ tolerations: []
++ labels: {}
++ annotations: {}
+ pdb:
+ enabled: true
+ maxUnavailable: 1
+ minAvailable:
+ resources: {}
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ extraPodSpec: {}
+ podPriority:
+ enabled: false
+ globalDefault: false
+ defaultPriority: 0
++ imagePullerPriority: -5
+ userPlaceholderPriority: -10
+ userPlaceholder:
+ enabled: true
+ image:
+- name: k8s.gcr.io/pause
+- # tag's can be updated by inspecting the output of the command:
+- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
++ name: registry.k8s.io/pause
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update prePuller.pause.image.tag
+- tag: "3.5"
++ #
++ tag: "3.9"
+ pullPolicy:
+ pullSecrets: []
++ revisionHistoryLimit:
+ replicas: 0
++ labels: {}
++ annotations: {}
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+@@ -517,6 +618,8 @@ scheduling:
+
+ # prePuller relates to the hook|continuous-image-puller DaemonsSets
+ prePuller:
++ revisionHistoryLimit:
++ labels: {}
+ annotations: {}
+ resources: {}
+ containerSecurityContext:
+@@ -530,8 +633,8 @@ prePuller:
+ pullOnlyOnChanges: true
+ # image and the configuration below relates to the hook-image-awaiter Job
+ image:
+- name: jupyterhub/k8s-image-awaiter
+- tag: "1.1.3"
++ name: quay.io/jupyterhub/k8s-image-awaiter
++ tag: "3.2.1"
+ pullPolicy:
+ pullSecrets: []
+ containerSecurityContext:
+@@ -543,6 +646,8 @@ prePuller:
+ tolerations: []
+ resources: {}
+ serviceAccount:
++ create: true
++ name:
+ annotations: {}
+ continuous:
+ enabled: true
+@@ -554,18 +659,20 @@ prePuller:
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+- name: k8s.gcr.io/pause
+- # tag's can be updated by inspecting the output of the command:
+- # gcloud container images list-tags k8s.gcr.io/pause --sort-by=~tags
++ name: registry.k8s.io/pause
++ # tag is automatically bumped to new patch versions by the
++ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update scheduling.userPlaceholder.image.tag
+- tag: "3.5"
++ #
++ tag: "3.9"
+ pullPolicy:
+ pullSecrets: []
+
+ ingress:
+ enabled: false
+ annotations: {}
++ ingressClassName:
+ hosts: []
+ pathSuffix:
+ pathType: Prefix
+@@ -581,7 +688,8 @@ ingress:
+ cull:
+ enabled: true
+ users: false # --cull-users
+- removeNamedServers: true # --remove-named-servers
++ adminUsers: true # --cull-admin-users
++ removeNamedServers: true # EDIT: CLOUDHARNESS
+ timeout: 3600 # --timeout
+ every: 600 # --cull-every
+ concurrency: 10 # --concurrency
+diff --git a/applications/jupyterhub/zero-to-jupyterhub-k8s b/applications/jupyterhub/zero-to-jupyterhub-k8s
+new file mode 160000
+index 0000000..c92c123
+--- /dev/null
++++ b/applications/jupyterhub/zero-to-jupyterhub-k8s
+@@ -0,0 +1 @@
++Subproject commit c92c12374795e84f36f5f16c4e8b8a448ad2f230-dirty
diff --git a/applications/jupyterhub/update.sh b/applications/jupyterhub/update.sh
new file mode 100644
index 000000000..cddf68996
--- /dev/null
+++ b/applications/jupyterhub/update.sh
@@ -0,0 +1,28 @@
+git clone -n git@github.com:jupyterhub/zero-to-jupyterhub-k8s.git
+git checkout jupyterhub
+git checkout chartpress.yaml
+pip install chartpress
+cd zero-to-jupyterhub-k8s
+chartpress -t $1
+cd ..
+cp -R zero-to-jupyterhub-k8s/jupyterhub/templates/* deploy/templates
+cp zero-to-jupyterhub-k8s/jupyterhub/files/hub/* deploy/resources/hub
+cp zero-to-jupyterhub-k8s/jupyterhub/values* deploy
+cd deploy
+
+rm -Rf templates/proxy/autohttps # Proxy is not used as node balancer
+rm templates/ingress.yaml # Default cloudharness ingress is used
+# Command to replace everything like files/hub/ inside deploy/templates with resources/jupyterhub/hub/
+find templates -type f -exec sed -i 's/files\/hub/resources\/jupyterhub\/hub/g' {} \;
+
+# replace .Values.hub. with .Values.hub.config with .Values.apps.jupyterhub.hub
+find templates -type f -exec sed -i 's/.Values./.Values.apps.jupyterhub./g' {} \;
+
+# replace .Values.apps.jupyterhub.hub.image with .Values.apps.jupyterhub.harness.deployment.image
+find templates -type f -exec sed -i 's/{{ .Values.apps.jupyterhub.hub.image.name }}:{{ .Values.apps.jupyterhub.hub.image.tag }}/{{ .Values.apps.jupyterhub.harness.deployment.image }}/g' {} \;
+
+
+
+find templates -type f -exec sed -i 's$.Template.BasePath "/hub$.Template.BasePath "/jupyterhub/hub$g' {} \;
+find templates -type f -exec sed -i 's$.Template.BasePath "/proxy$.Template.BasePath "/jupyterhub/proxy$g' {} \;
+find templates -type f -exec sed -i 's$.Template.BasePath "/scheduling$.Template.BasePath "/jupyterhub/scheduling$g' {} \;
diff --git a/applications/jupyterhub/zero-to-jupyterhub-k8s b/applications/jupyterhub/zero-to-jupyterhub-k8s
new file mode 160000
index 000000000..c92c12374
--- /dev/null
+++ b/applications/jupyterhub/zero-to-jupyterhub-k8s
@@ -0,0 +1 @@
+Subproject commit c92c12374795e84f36f5f16c4e8b8a448ad2f230
diff --git a/applications/neo4j/.gitignore b/applications/neo4j/.gitignore
new file mode 100644
index 000000000..7c5616916
--- /dev/null
+++ b/applications/neo4j/.gitignore
@@ -0,0 +1 @@
+helm-charts
\ No newline at end of file
diff --git a/applications/neo4j/README.md b/applications/neo4j/README.md
new file mode 100644
index 000000000..cd46617d4
--- /dev/null
+++ b/applications/neo4j/README.md
@@ -0,0 +1,22 @@
+# Neo4j browser helm chart
+
+Enable this application to deploy a Neo4j server with the neo4j browser enabled.
+
+## How to use
+The neo4j browser will be enabled at neo4j.[DOMAIN].
+
+![Neo4j browser login](docs/browser-login.png)
+
+The default credentials are set in the [application configuration file](deploy/values.yaml).
+
+It is recommended to change the password during the first login, such as:
+
+```
+ALTER USER default SET PASSWORD ''
+```
+
+## Implementation
+This implementation uses the Neo4j reverse proxy server to enable usage via Ingress and http(s).
+
+For more information, see https://neo4j.com/docs/operations-manual/current/kubernetes/accessing-neo4j-ingress/
+
diff --git a/applications/neo4j/deploy/templates/reverseProxyServer.yaml b/applications/neo4j/deploy/templates/reverseProxyServer.yaml
new file mode 100644
index 000000000..f273acb96
--- /dev/null
+++ b/applications/neo4j/deploy/templates/reverseProxyServer.yaml
@@ -0,0 +1,37 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.apps.neo4j.harness.deployment.name }}
+ labels:
+ app: {{ .Values.apps.neo4j.harness.deployment.name }}
+ namespace: "{{ .Release.Namespace }}"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Values.apps.neo4j.harness.deployment.name }}
+ template:
+ metadata:
+ name: {{ .Values.apps.neo4j.harness.deployment.name }}
+ labels:
+ name: {{ .Values.apps.neo4j.harness.deployment.name }}
+ app: {{ .Values.apps.neo4j.harness.deployment.name }}
+ spec:
+ securityContext: {{ toYaml .Values.apps.neo4j.reverseProxy.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Values.apps.neo4j.harness.deployment.name }}
+ image: {{ .Values.apps.neo4j.reverseProxy.image }}
+ imagePullPolicy: Always
+ securityContext: {{ toYaml .Values.apps.neo4j.reverseProxy.containerSecurityContext | nindent 12 }}
+ ports:
+ - containerPort: {{ .Values.apps.neo4j.harness.deployment.port }}
+ env:
+ - name: SERVICE_NAME
+ value: {{ .Values.apps.neo4j.harness.database.name }}
+ - name: PORT
+ value: {{ .Values.apps.neo4j.harness.deployment.port | quote }}
+ - name: DOMAIN
+ value: {{ .Values.apps.neo4j.reverseProxy.domain | default "cluster.local" }}
+ - name: NAMESPACE
+ value: {{ .Release.Namespace }}
+---
\ No newline at end of file
diff --git a/applications/neo4j/deploy/values.yaml b/applications/neo4j/deploy/values.yaml
new file mode 100644
index 000000000..4a9a45a4d
--- /dev/null
+++ b/applications/neo4j/deploy/values.yaml
@@ -0,0 +1,41 @@
+harness:
+ subdomain: neo4j
+ database:
+ auto: true
+ name: neo4j-db
+ type: neo4j
+ user: default
+ pass: default
+ deployment:
+ auto: false
+ service:
+ auto: true
+# Parameters for reverse proxy
+reverseProxy:
+ image: "neo4j/helm-charts-reverse-proxy:5.23"
+
+ # Name of the kubernetes service. This service should have the ports 7474 and 7687 open.
+ # This could be the admin service ex: "standalone-admin" or the loadbalancer service ex: "standalone" created via the neo4j helm chart
+ # serviceName , namespace , domain together will form the complete k8s service url. Ex: standalone-admin.default.svc.cluster.local
+ # When used against a cluster ensure the service being used is pointing to all the cluster instances.
+ # This could be the loadbalancer from neo4j helm chart or the headless service installed via neo4j-headless-service helm chart
+ serviceName: ""
+ # default is set to cluster.local
+ domain: "cluster.local"
+
+ # securityContext defines privilege and access control settings for a Container. Making sure that we dont run Neo4j as root user.
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 7474
+ runAsGroup: 7474
+ capabilities:
+ drop:
+ - all
+
+ podSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 7474
+ runAsGroup: 7474
+ fsGroup: 7474
+ fsGroupChangePolicy: "Always"
diff --git a/applications/neo4j/docs/browser-login.png b/applications/neo4j/docs/browser-login.png
new file mode 100644
index 000000000..86e1db1ee
Binary files /dev/null and b/applications/neo4j/docs/browser-login.png differ
diff --git a/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
new file mode 100644
index 000000000..50dc08ed9
--- /dev/null
+++ b/applications/nfsserver/deploy/templates-compose/nfsserver-deployment.yaml
@@ -0,0 +1,19 @@
+{{- define "nfsserver.deployment" }}
+{{- with .apps.nfsserver}}
+
+{{ .name }}:
+ image: {{ .harness.deployment.image }}
+ environment:
+ # NFS useDNS? {{ .nfs.useDNS }}
+ {{- if .nfs.useDNS }}
+ - NFS_SERVER={{ printf "nfs-server.%s.svc.cluster.local" .namespace }}
+ {{- end }}
+ - NFS_PATH={{ .nfs.path }}
+ - PROVISIONER_NAME={{ printf "%s-nfs-provisioner" .namespace }}
+
+ volumes:
+ - type: volume
+ source: {{ .nfs.volumeName }}
+ target: {{ .nfs.path }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/applications/nfsserver/nfs-subdir-external-provisioner/release-tools/boilerplate/boilerplate.py b/applications/nfsserver/nfs-subdir-external-provisioner/release-tools/boilerplate/boilerplate.py
index 5618b9ab8..26a2412ac 100755
--- a/applications/nfsserver/nfs-subdir-external-provisioner/release-tools/boilerplate/boilerplate.py
+++ b/applications/nfsserver/nfs-subdir-external-provisioner/release-tools/boilerplate/boilerplate.py
@@ -50,6 +50,7 @@
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
+
def get_refs():
refs = {}
@@ -63,6 +64,7 @@ def get_refs():
return refs
+
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
@@ -127,13 +129,16 @@ def file_passes(filename, refs, regexs):
return True
+
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
+
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
'repo-infra/verify/boilerplate/test', '.glide']
+
def normalize_files(files):
newfiles = []
for pathname in files:
@@ -142,6 +147,7 @@ def normalize_files(files):
newfiles.append(pathname)
return newfiles
+
def get_files(extensions):
files = []
if len(args.filenames) > 0:
@@ -170,13 +176,14 @@ def get_files(extensions):
outfiles.append(pathname)
return outfiles
+
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
- regexs["year"] = re.compile( 'YEAR' )
+ regexs["year"] = re.compile('YEAR')
# dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything
years = range(2014, date.today().year + 1)
- regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) )
+ regexs["date"] = re.compile('(%s)' % "|".join(map(lambda l: str(l), years)))
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
@@ -184,7 +191,6 @@ def get_regexs():
return regexs
-
def main():
regexs = get_regexs()
refs = get_refs()
@@ -196,5 +202,6 @@ def main():
return 0
+
if __name__ == "__main__":
sys.exit(main())
diff --git a/applications/notifications/server/notifications/__main__.py b/applications/notifications/server/notifications/__main__.py
index 38389bde9..03c83f863 100644
--- a/applications/notifications/server/notifications/__main__.py
+++ b/applications/notifications/server/notifications/__main__.py
@@ -8,4 +8,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/applications/notifications/server/notifications/adapters/base_adapter.py b/applications/notifications/server/notifications/adapters/base_adapter.py
index 4ea030fe7..5ecf70f9e 100644
--- a/applications/notifications/server/notifications/adapters/base_adapter.py
+++ b/applications/notifications/server/notifications/adapters/base_adapter.py
@@ -65,7 +65,7 @@ def __init__(self, notification, channel, backend):
os.path.join(
self.channel["templateFolder"],
self.notification["template"]
- ))
+ ))
@abc.abstractmethod
def send(self, context):
diff --git a/applications/notifications/server/notifications/backends/console_backend.py b/applications/notifications/server/notifications/backends/console_backend.py
index 0a1fd005d..a50bc8030 100644
--- a/applications/notifications/server/notifications/backends/console_backend.py
+++ b/applications/notifications/server/notifications/backends/console_backend.py
@@ -18,4 +18,4 @@ def __init__(self, *args, **kwargs):
def send(self):
log.info("Send notification")
log.info(f"args:{self.args}")
- log.info("kwargs:\n"+"\n".join("{0}: {1!r}".format(k,v) for k,v in self.kwargs.items()))
+ log.info("kwargs:\n" + "\n".join("{0}: {1!r}".format(k, v) for k, v in self.kwargs.items()))
diff --git a/applications/notifications/server/notifications/backends/email_backend.py b/applications/notifications/server/notifications/backends/email_backend.py
index 53f71ec82..8a3ae77df 100644
--- a/applications/notifications/server/notifications/backends/email_backend.py
+++ b/applications/notifications/server/notifications/backends/email_backend.py
@@ -22,7 +22,6 @@ def __init__(self, email_from=None, email_to=None, subject=None, message=None, *
self.subject = subject
self.message = message
-
def send(self):
logger.info(f"Sending notification email to {self.email_to}")
msg = EmailMessage()
@@ -35,7 +34,7 @@ def send(self):
email_pass = get_secret_or_empty('email-password')
email_host = conf.get_configuration()["smtp"]["host"]
email_port = conf.get_configuration()["smtp"]["port"]
- email_tls = conf.get_configuration()["smtp"].get("use_tls")
+ email_tls = conf.get_configuration()["smtp"].get("use_tls")
smtp = smtplib.SMTP(email_host, email_port)
if email_user or email_pass:
diff --git a/applications/notifications/server/notifications/controllers/helpers.py b/applications/notifications/server/notifications/controllers/helpers.py
index 090abd485..355e0894e 100644
--- a/applications/notifications/server/notifications/controllers/helpers.py
+++ b/applications/notifications/server/notifications/controllers/helpers.py
@@ -14,9 +14,9 @@ def send(operation, context):
if not channel:
continue
-
+
for b in channel["backends"]:
- if b == "email":
+ if b == "email":
channel_backend = NotificationEmailBackend
elif b == "console":
channel_backend = NotificationConsoleBackend
diff --git a/applications/notifications/server/notifications/controllers/notifications_controller.py b/applications/notifications/server/notifications/controllers/notifications_controller.py
index a0afde4ae..cd4634c52 100644
--- a/applications/notifications/server/notifications/controllers/notifications_controller.py
+++ b/applications/notifications/server/notifications/controllers/notifications_controller.py
@@ -43,6 +43,7 @@ def handle_event(self, message: CDCEvent):
}
)
+
class NotificationsController:
_notification_handlers = []
@@ -52,7 +53,7 @@ def __init__(self):
@staticmethod
def handler(app, event_client, message):
- log.debug("Handler received message: %s",message)
+ log.debug("Handler received message: %s", message)
for nh in [nh for nh in NotificationsController._notification_handlers if nh.message_type == message.get("message_type")]:
nh.handle_event(CDCEvent.from_dict(message))
@@ -64,8 +65,8 @@ def _init_handlers(self):
log.info(f"Init handler for event {notification_app['app']}.{notification_type['name']} type {event_type}")
nss = NotificationHandler(
event_type,
- notification_app["app"],
- notification_type["name"],
+ notification_app["app"],
+ notification_type["name"],
notification_type["events"])
if not nss.topic_id in (handler.topic_id for handler in NotificationsController._notification_handlers):
self._consume_topic(nss.topic_id)
diff --git a/applications/notifications/server/setup.py b/applications/notifications/server/setup.py
index cccc107f7..f625329c1 100644
--- a/applications/notifications/server/setup.py
+++ b/applications/notifications/server/setup.py
@@ -4,7 +4,7 @@
from setuptools import setup, find_packages
NAME = "notifications"
-VERSION = "2.3.0"
+VERSION = "2.4.0"
# To install the library, run the following
#
@@ -14,7 +14,7 @@
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
- "jinja2>=3"
+ "jinja2>=3",
"python_dateutil>=2.6.0"
]
@@ -34,4 +34,3 @@
notifications
"""
)
-
diff --git a/applications/samples/Dockerfile b/applications/samples/Dockerfile
index 2925caf5a..522de3c9e 100644
--- a/applications/samples/Dockerfile
+++ b/applications/samples/Dockerfile
@@ -3,6 +3,9 @@ ARG CLOUDHARNESS_FLASK
FROM $CLOUDHARNESS_FRONTEND_BUILD as frontend
+ARG TEST_ARGUMENT=default
+RUN echo $TEST_ARGUMENT
+
ENV APP_DIR=/app
WORKDIR ${APP_DIR}
diff --git a/applications/samples/api/openapi.yaml b/applications/samples/api/openapi.yaml
index ac78581f8..f51b328df 100644
--- a/applications/samples/api/openapi.yaml
+++ b/applications/samples/api/openapi.yaml
@@ -1,4 +1,4 @@
-openapi: 3.0.0
+openapi: "3.0.3"
info:
title: CloudHarness Sample API
version: 0.1.0
@@ -23,11 +23,10 @@ paths:
description: This won't happen
"500":
description: Sentry entry should come!
- deprecated: true
operationId: error
summary: test sentry is working
x-openapi-router-controller: samples.controllers.test_controller
-
+
/ping:
get:
tags:
@@ -231,13 +230,11 @@ paths:
schema:
type: object
description: Operation result
- deprecated: true
operationId: submit_sync
summary: Send a synchronous operation
x-openapi-router-controller: samples.controllers.workflows_controller
/operation_sync_results:
get:
- deprecated: true
tags:
- workflows
parameters:
diff --git a/applications/samples/backend/requirements.txt b/applications/samples/backend/requirements.txt
index b3db72c8a..f9d4fe449 100644
--- a/applications/samples/backend/requirements.txt
+++ b/applications/samples/backend/requirements.txt
@@ -1,5 +1,3 @@
connexion[swagger-ui]==2.14.2
-swagger-ui-bundle >= 0.0.2
-python_dateutil >= 2.6.0
-setuptools >= 21.0.0
-Flask<3.0.0
+Flask == 2.2.5
+swagger-ui-bundle==0.0.9
\ No newline at end of file
diff --git a/applications/samples/backend/samples/controllers/auth_controller.py b/applications/samples/backend/samples/controllers/auth_controller.py
index 5d3ecb47b..e144a976c 100644
--- a/applications/samples/backend/samples/controllers/auth_controller.py
+++ b/applications/samples/backend/samples/controllers/auth_controller.py
@@ -13,6 +13,8 @@ def valid_token(): # noqa: E501
:rtype: List[Valid]
"""
+ from cloudharness.middleware import get_authentication_token
+ token = get_authentication_token()
return 'OK!'
@@ -24,4 +26,8 @@ def valid_cookie(): # noqa: E501
:rtype: List[Valid]
"""
- return 'OK!'
\ No newline at end of file
+ from cloudharness.middleware import get_authentication_token
+ from cloudharness.auth import decode_token
+ token = get_authentication_token()
+ assert decode_token(token)
+ return 'OK'
diff --git a/applications/samples/backend/samples/controllers/resource_controller.py b/applications/samples/backend/samples/controllers/resource_controller.py
index 3204708bf..4f17d3da5 100644
--- a/applications/samples/backend/samples/controllers/resource_controller.py
+++ b/applications/samples/backend/samples/controllers/resource_controller.py
@@ -23,9 +23,9 @@ def create_sample_resource(sample_resource=None): # noqa: E501
return "Payload is not of type SampleResource", 400
# Create a file inside the nfs
- with open("/mnt/myvolume/myfile", "w") as f:
+ with open("/tmp/myvolume/myfile", "w") as f:
print("test", file=f)
-
+
return resource_service.create_sample_resource(sample_resource), 201
diff --git a/applications/samples/backend/samples/controllers/security_controller_.py b/applications/samples/backend/samples/controllers/security_controller_.py
index 8dd254a23..c052e680f 100644
--- a/applications/samples/backend/samples/controllers/security_controller_.py
+++ b/applications/samples/backend/samples/controllers/security_controller_.py
@@ -13,5 +13,3 @@ def info_from_bearerAuth(token):
:rtype: dict | None
"""
return {'uid': 'user_id'}
-
-
diff --git a/applications/samples/backend/samples/controllers/test_controller.py b/applications/samples/backend/samples/controllers/test_controller.py
index e792c99e0..f3580dc3d 100644
--- a/applications/samples/backend/samples/controllers/test_controller.py
+++ b/applications/samples/backend/samples/controllers/test_controller.py
@@ -22,8 +22,26 @@ def ping(): # noqa: E501
:rtype: str
"""
+
+ import os
+
+ expected_environment_variables = {
+ 'WORKERS': '3',
+ 'ENVIRONMENT_TEST_A': 'value',
+ 'ENVIRONMENT_TEST_B': '123',
+ }
+
+ for key, expected_value in expected_environment_variables.items():
+ try:
+ environment_value = os.environ[key]
+ if environment_value != expected_value:
+ raise Exception(f'Expected environment variable {key} to be {expected_value}, but got {environment_value}')
+ except KeyError:
+ raise Exception(f'Expected to have an environment variable {key} defined')
+
import time
return time.time()
+
def serialization():
- return User(last_name="Last", first_name="First")
\ No newline at end of file
+ return User(last_name="Last", first_name="First")
diff --git a/applications/samples/backend/samples/encoder.py b/applications/samples/backend/samples/encoder.py
index fe2c3b183..ce94c8c87 100644
--- a/applications/samples/backend/samples/encoder.py
+++ b/applications/samples/backend/samples/encoder.py
@@ -1,8 +1,6 @@
+from connexion.apps.flask_app import FlaskJSONEncoder
-import six
-
-from samples.models.base_model_ import Model
-from flask.json import FlaskJSONEncoder
+from samples.models.base_model import Model
class JSONEncoder(FlaskJSONEncoder):
@@ -11,7 +9,7 @@ class JSONEncoder(FlaskJSONEncoder):
def default(self, o):
if isinstance(o, Model):
dikt = {}
- for attr, _ in six.iteritems(o.openapi_types):
+ for attr in o.openapi_types:
value = getattr(o, attr)
if value is None and not self.include_nulls:
continue
diff --git a/applications/samples/backend/samples/models/__init__.py b/applications/samples/backend/samples/models/__init__.py
index 4034deb6a..946173988 100644
--- a/applications/samples/backend/samples/models/__init__.py
+++ b/applications/samples/backend/samples/models/__init__.py
@@ -1,7 +1,4 @@
-# coding: utf-8
-
# flake8: noqa
-from __future__ import absolute_import
# import models into model package
from samples.models.inline_response202 import InlineResponse202
from samples.models.inline_response202_task import InlineResponse202Task
diff --git a/applications/samples/backend/samples/models/base_model.py b/applications/samples/backend/samples/models/base_model.py
new file mode 100644
index 000000000..30bbbb638
--- /dev/null
+++ b/applications/samples/backend/samples/models/base_model.py
@@ -0,0 +1,68 @@
+import pprint
+
+import typing
+
+from samples import util
+
+T = typing.TypeVar('T')
+
+
+class Model:
+ # openapiTypes: The key is attribute name and the
+ # value is attribute type.
+ openapi_types: typing.Dict[str, type] = {}
+
+ # attributeMap: The key is attribute name and the
+ # value is json key in definition.
+ attribute_map: typing.Dict[str, str] = {}
+
+ @classmethod
+ def from_dict(cls: typing.Type[T], dikt) -> T:
+ """Returns the dict as a model"""
+ return util.deserialize_model(dikt, cls)
+
+ def to_dict(self):
+ """Returns the model properties as a dict
+
+ :rtype: dict
+ """
+ result = {}
+
+ for attr in self.openapi_types:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model
+
+ :rtype: str
+ """
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ return not self == other
diff --git a/applications/samples/backend/samples/models/inline_response202.py b/applications/samples/backend/samples/models/inline_response202.py
index b6d2a3ca9..e98cb00ee 100644
--- a/applications/samples/backend/samples/models/inline_response202.py
+++ b/applications/samples/backend/samples/models/inline_response202.py
@@ -1,11 +1,8 @@
-# coding: utf-8
-
-from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
-from samples.models.base_model_ import Model
+from samples.models.base_model import Model
from samples.models.inline_response202_task import InlineResponse202Task
from samples import util
@@ -45,7 +42,7 @@ def from_dict(cls, dikt) -> 'InlineResponse202':
return util.deserialize_model(dikt, cls)
@property
- def task(self):
+ def task(self) -> InlineResponse202Task:
"""Gets the task of this InlineResponse202.
@@ -55,7 +52,7 @@ def task(self):
return self._task
@task.setter
- def task(self, task):
+ def task(self, task: InlineResponse202Task):
"""Sets the task of this InlineResponse202.
diff --git a/applications/samples/backend/samples/models/inline_response202_task.py b/applications/samples/backend/samples/models/inline_response202_task.py
index e9a53b7f3..2c4af7a05 100644
--- a/applications/samples/backend/samples/models/inline_response202_task.py
+++ b/applications/samples/backend/samples/models/inline_response202_task.py
@@ -1,11 +1,8 @@
-# coding: utf-8
-
-from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
-from samples.models.base_model_ import Model
+from samples.models.base_model import Model
from samples import util
@@ -48,7 +45,7 @@ def from_dict(cls, dikt) -> 'InlineResponse202Task':
return util.deserialize_model(dikt, cls)
@property
- def href(self):
+ def href(self) -> str:
"""Gets the href of this InlineResponse202Task.
the url where to check the operation status # noqa: E501
@@ -59,7 +56,7 @@ def href(self):
return self._href
@href.setter
- def href(self, href):
+ def href(self, href: str):
"""Sets the href of this InlineResponse202Task.
the url where to check the operation status # noqa: E501
@@ -71,7 +68,7 @@ def href(self, href):
self._href = href
@property
- def name(self):
+ def name(self) -> str:
"""Gets the name of this InlineResponse202Task.
@@ -81,7 +78,7 @@ def name(self):
return self._name
@name.setter
- def name(self, name):
+ def name(self, name: str):
"""Sets the name of this InlineResponse202Task.
diff --git a/applications/samples/backend/samples/models/sample_resource.py b/applications/samples/backend/samples/models/sample_resource.py
index 1deca853f..d54774527 100644
--- a/applications/samples/backend/samples/models/sample_resource.py
+++ b/applications/samples/backend/samples/models/sample_resource.py
@@ -1,11 +1,8 @@
-# coding: utf-8
-
-from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
-from samples.models.base_model_ import Model
+from samples.models.base_model import Model
from samples import util
@@ -53,7 +50,7 @@ def from_dict(cls, dikt) -> 'SampleResource':
return util.deserialize_model(dikt, cls)
@property
- def a(self):
+ def a(self) -> float:
"""Gets the a of this SampleResource.
# noqa: E501
@@ -64,7 +61,7 @@ def a(self):
return self._a
@a.setter
- def a(self, a):
+ def a(self, a: float):
"""Sets the a of this SampleResource.
# noqa: E501
@@ -78,7 +75,7 @@ def a(self, a):
self._a = a
@property
- def b(self):
+ def b(self) -> float:
"""Gets the b of this SampleResource.
# noqa: E501
@@ -89,7 +86,7 @@ def b(self):
return self._b
@b.setter
- def b(self, b):
+ def b(self, b: float):
"""Sets the b of this SampleResource.
# noqa: E501
@@ -101,7 +98,7 @@ def b(self, b):
self._b = b
@property
- def id(self):
+ def id(self) -> float:
"""Gets the id of this SampleResource.
# noqa: E501
@@ -112,7 +109,7 @@ def id(self):
return self._id
@id.setter
- def id(self, id):
+ def id(self, id: float):
"""Sets the id of this SampleResource.
# noqa: E501
diff --git a/applications/samples/backend/samples/openapi/openapi.yaml b/applications/samples/backend/samples/openapi/openapi.yaml
index 4e63db551..6a8757716 100644
--- a/applications/samples/backend/samples/openapi/openapi.yaml
+++ b/applications/samples/backend/samples/openapi/openapi.yaml
@@ -1,4 +1,4 @@
-openapi: 3.0.0
+openapi: 3.0.3
info:
contact:
email: cloudharness@metacell.us
diff --git a/applications/samples/backend/samples/service/resource_service.py b/applications/samples/backend/samples/service/resource_service.py
index 64765ad24..9fbba7b45 100644
--- a/applications/samples/backend/samples/service/resource_service.py
+++ b/applications/samples/backend/samples/service/resource_service.py
@@ -8,6 +8,7 @@
counter = 0
resources = {}
+
class ResourceNotFound(Exception):
pass
@@ -34,7 +35,7 @@ def get_sample_resource(sampleresource_id: int): # noqa: E501
def get_sample_resources() -> List[SampleResource]:
- return [v for v in resources.values()]
+ return [v for v in resources.values()]
def update_sample_resource(sampleresource_id: int, sample_resource: SampleResource) -> List[SampleResource]:
diff --git a/applications/samples/backend/samples/test/test_sample.py b/applications/samples/backend/samples/test/test_sample.py
index a6f1c78f9..62acfabab 100644
--- a/applications/samples/backend/samples/test/test_sample.py
+++ b/applications/samples/backend/samples/test/test_sample.py
@@ -1,2 +1,2 @@
def test_sample():
- assert True
\ No newline at end of file
+ assert True
diff --git a/applications/samples/backend/samples/typing_utils.py b/applications/samples/backend/samples/typing_utils.py
index 0563f81fd..74e3c913a 100644
--- a/applications/samples/backend/samples/typing_utils.py
+++ b/applications/samples/backend/samples/typing_utils.py
@@ -1,5 +1,3 @@
-# coding: utf-8
-
import sys
if sys.version_info < (3, 7):
diff --git a/applications/samples/backend/samples/util.py b/applications/samples/backend/samples/util.py
index 96a83499d..b802fafda 100644
--- a/applications/samples/backend/samples/util.py
+++ b/applications/samples/backend/samples/util.py
@@ -1,6 +1,5 @@
import datetime
-import six
import typing
from samples import typing_utils
@@ -16,7 +15,7 @@ def _deserialize(data, klass):
if data is None:
return None
- if klass in six.integer_types or klass in (float, str, bool, bytearray):
+ if klass in (int, float, str, bool, bytearray):
return _deserialize_primitive(data, klass)
elif klass == object:
return _deserialize_object(data)
@@ -45,7 +44,7 @@ def _deserialize_primitive(data, klass):
try:
value = klass(data)
except UnicodeEncodeError:
- value = six.u(data)
+ value = data
except TypeError:
value = data
return value
@@ -68,8 +67,8 @@ def deserialize_date(string):
:rtype: date
"""
if string is None:
- return None
-
+ return None
+
try:
from dateutil.parser import parse
return parse(string).date()
@@ -88,8 +87,8 @@ def deserialize_datetime(string):
:rtype: datetime
"""
if string is None:
- return None
-
+ return None
+
try:
from dateutil.parser import parse
return parse(string)
@@ -110,7 +109,7 @@ def deserialize_model(data, klass):
if not instance.openapi_types:
return data
- for attr, attr_type in six.iteritems(instance.openapi_types):
+ for attr, attr_type in instance.openapi_types.items():
if data is not None \
and instance.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
@@ -145,4 +144,4 @@ def _deserialize_dict(data, boxed_type):
:rtype: dict
"""
return {k: _deserialize(v, boxed_type)
- for k, v in six.iteritems(data)}
+ for k, v in data.items()}
diff --git a/applications/samples/backend/setup.py b/applications/samples/backend/setup.py
index 471cf86b3..a7a64a899 100644
--- a/applications/samples/backend/setup.py
+++ b/applications/samples/backend/setup.py
@@ -4,7 +4,7 @@
from setuptools import setup, find_packages
NAME = "samples"
-VERSION = "2.3.0"
+VERSION = "2.4.0"
# To install the library, run the following
#
@@ -14,11 +14,13 @@
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
- "connexion>=2.0.2",
- "swagger-ui-bundle>=0.0.2",
+ "connexion[swagger-ui]==2.14.2",
+ "Flask >= 2.2.5",
"python_dateutil>=2.6.0",
"pyjwt>=2.6.0",
- "cloudharness"
+ "swagger-ui-bundle>=0.0.2",
+ "cloudharness",
+
]
setup(
@@ -38,4 +40,3 @@
CloudHarness Sample api
"""
)
-
diff --git a/applications/samples/backend/tox.ini b/applications/samples/backend/tox.ini
index df3f17737..5d5ecced8 100644
--- a/applications/samples/backend/tox.ini
+++ b/applications/samples/backend/tox.ini
@@ -5,7 +5,7 @@ skipsdist=True
[testenv]
deps=-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
- {toxinidir}
+ {toxinidir}
commands=
pytest --cov=samples
diff --git a/applications/samples/backend/www/assets/index-Cyl2oP5E.css b/applications/samples/backend/www/assets/index-Cyl2oP5E.css
new file mode 100644
index 000000000..aef4555b0
--- /dev/null
+++ b/applications/samples/backend/www/assets/index-Cyl2oP5E.css
@@ -0,0 +1 @@
+body{text-align:center;background-color:"#eeeeee";font-family:Roboto,Helvetica,sans-serif}
diff --git a/applications/samples/backend/www/assets/index-DtmKuX2X.js b/applications/samples/backend/www/assets/index-DtmKuX2X.js
new file mode 100644
index 000000000..19bf27f51
--- /dev/null
+++ b/applications/samples/backend/www/assets/index-DtmKuX2X.js
@@ -0,0 +1,209 @@
+(function(){const s=document.createElement("link").relList;if(s&&s.supports&&s.supports("modulepreload"))return;for(const y of document.querySelectorAll('link[rel="modulepreload"]'))v(y);new MutationObserver(y=>{for(const S of y)if(S.type==="childList")for(const f of S.addedNodes)f.tagName==="LINK"&&f.rel==="modulepreload"&&v(f)}).observe(document,{childList:!0,subtree:!0});function p(y){const S={};return y.integrity&&(S.integrity=y.integrity),y.referrerPolicy&&(S.referrerPolicy=y.referrerPolicy),y.crossOrigin==="use-credentials"?S.credentials="include":y.crossOrigin==="anonymous"?S.credentials="omit":S.credentials="same-origin",S}function v(y){if(y.ep)return;y.ep=!0;const S=p(y);fetch(y.href,S)}})();function cA(l){return l&&l.__esModule&&Object.prototype.hasOwnProperty.call(l,"default")?l.default:l}var SE={exports:{}},Uh={},EE={exports:{}},hf={exports:{}};hf.exports;(function(l,s){/**
+ * @license React
+ * react.development.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */(function(){typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart(new Error);var p="18.3.1",v=Symbol.for("react.element"),y=Symbol.for("react.portal"),S=Symbol.for("react.fragment"),f=Symbol.for("react.strict_mode"),N=Symbol.for("react.profiler"),z=Symbol.for("react.provider"),U=Symbol.for("react.context"),M=Symbol.for("react.forward_ref"),F=Symbol.for("react.suspense"),ee=Symbol.for("react.suspense_list"),$=Symbol.for("react.memo"),B=Symbol.for("react.lazy"),ce=Symbol.for("react.offscreen"),Je=Symbol.iterator,Ye="@@iterator";function Ce(d){if(d===null||typeof d!="object")return null;var g=Je&&d[Je]||d[Ye];return typeof g=="function"?g:null}var ne={current:null},Ke={transition:null},de={current:null,isBatchingLegacy:!1,didScheduleLegacyUpdate:!1},Fe={current:null},ge={},Gt=null;function Cn(d){Gt=d}ge.setExtraStackFrame=function(d){Gt=d},ge.getCurrentStack=null,ge.getStackAddendum=function(){var d="";Gt&&(d+=Gt);var g=ge.getCurrentStack;return g&&(d+=g()||""),d};var Rt=!1,rt=!1,Fn=!1,xe=!1,Ve=!1,yt={ReactCurrentDispatcher:ne,ReactCurrentBatchConfig:Ke,ReactCurrentOwner:Fe};yt.ReactDebugCurrentFrame=ge,yt.ReactCurrentActQueue=de;function gt(d){{for(var g=arguments.length,D=new Array(g>1?g-1:0),O=1;O1?g-1:0),O=1;O1){for(var et=Array(Ie),tt=0;tt1){for(var ut=Array(tt),pt=0;pt is not supported and will be removed in a future major release. Did you mean to render instead?")),g.Provider},set:function(Z){g.Provider=Z}},_currentValue:{get:function(){return g._currentValue},set:function(Z){g._currentValue=Z}},_currentValue2:{get:function(){return g._currentValue2},set:function(Z){g._currentValue2=Z}},_threadCount:{get:function(){return g._threadCount},set:function(Z){g._threadCount=Z}},Consumer:{get:function(){return D||(D=!0,be("Rendering is not supported and will be removed in a future major release. Did you mean to render instead?")),g.Consumer}},displayName:{get:function(){return g.displayName},set:function(Z){P||(gt("Setting `displayName` on Context.Consumer has no effect. You should set it directly on the context with Context.displayName = '%s'.",Z),P=!0)}}}),g.Consumer=le}return g._currentRenderer=null,g._currentRenderer2=null,g}var ia=-1,Ma=0,oa=1,Pr=2;function Sr(d){if(d._status===ia){var g=d._result,D=g();if(D.then(function(le){if(d._status===Ma||d._status===ia){var Z=d;Z._status=oa,Z._result=le}},function(le){if(d._status===Ma||d._status===ia){var Z=d;Z._status=Pr,Z._result=le}}),d._status===ia){var O=d;O._status=Ma,O._result=D}}if(d._status===oa){var P=d._result;return P===void 0&&be(`lazy: Expected the result of a dynamic import() call. Instead received: %s
+
+Your code should look like:
+ const MyComponent = lazy(() => import('./MyComponent'))
+
+Did you accidentally put curly braces around the import?`,P),"default"in P||be(`lazy: Expected the result of a dynamic import() call. Instead received: %s
+
+Your code should look like:
+ const MyComponent = lazy(() => import('./MyComponent'))`,P),P.default}else throw d._result}function b(d){var g={_status:ia,_result:d},D={$$typeof:B,_payload:g,_init:Sr};{var O,P;Object.defineProperties(D,{defaultProps:{configurable:!0,get:function(){return O},set:function(le){be("React.lazy(...): It is not supported to assign `defaultProps` to a lazy component import. Either specify them where the component is defined, or create a wrapping component around it."),O=le,Object.defineProperty(D,"defaultProps",{enumerable:!0})}},propTypes:{configurable:!0,get:function(){return P},set:function(le){be("React.lazy(...): It is not supported to assign `propTypes` to a lazy component import. Either specify them where the component is defined, or create a wrapping component around it."),P=le,Object.defineProperty(D,"propTypes",{enumerable:!0})}}})}return D}function j(d){d!=null&&d.$$typeof===$?be("forwardRef requires a render function but received a `memo` component. Instead of forwardRef(memo(...)), use memo(forwardRef(...))."):typeof d!="function"?be("forwardRef requires a render function but was given %s.",d===null?"null":typeof d):d.length!==0&&d.length!==2&&be("forwardRef render functions accept exactly two parameters: props and ref. %s",d.length===1?"Did you forget to use the ref parameter?":"Any additional parameter will be undefined."),d!=null&&(d.defaultProps!=null||d.propTypes!=null)&&be("forwardRef render functions do not support propTypes or defaultProps. Did you accidentally pass a React component?");var g={$$typeof:M,render:d};{var D;Object.defineProperty(g,"displayName",{enumerable:!1,configurable:!0,get:function(){return D},set:function(O){D=O,!d.name&&!d.displayName&&(d.displayName=O)}})}return g}var G;G=Symbol.for("react.module.reference");function oe(d){return!!(typeof d=="string"||typeof d=="function"||d===S||d===N||Ve||d===f||d===F||d===ee||xe||d===ce||Rt||rt||Fn||typeof d=="object"&&d!==null&&(d.$$typeof===B||d.$$typeof===$||d.$$typeof===z||d.$$typeof===U||d.$$typeof===M||d.$$typeof===G||d.getModuleId!==void 0))}function Ae(d,g){oe(d)||be("memo: The first argument must be a component. Instead received: %s",d===null?"null":typeof d);var D={$$typeof:$,type:d,compare:g===void 0?null:g};{var O;Object.defineProperty(D,"displayName",{enumerable:!1,configurable:!0,get:function(){return O},set:function(P){O=P,!d.name&&!d.displayName&&(d.displayName=P)}})}return D}function fe(){var d=ne.current;return d===null&&be(`Invalid hook call. Hooks can only be called inside of the body of a function component. This could happen for one of the following reasons:
+1. You might have mismatching versions of React and the renderer (such as React DOM)
+2. You might be breaking the Rules of Hooks
+3. You might have more than one copy of React in the same app
+See https://reactjs.org/link/invalid-hook-call for tips about how to debug and fix this problem.`),d}function Te(d){var g=fe();if(d._context!==void 0){var D=d._context;D.Consumer===d?be("Calling useContext(Context.Consumer) is not supported, may cause bugs, and will be removed in a future major release. Did you mean to call useContext(Context) instead?"):D.Provider===d&&be("Calling useContext(Context.Provider) is not supported. Did you mean to call useContext(Context) instead?")}return g.useContext(d)}function Se(d){var g=fe();return g.useState(d)}function ct(d,g,D){var O=fe();return O.useReducer(d,g,D)}function we(d){var g=fe();return g.useRef(d)}function st(d,g){var D=fe();return D.useEffect(d,g)}function un(d,g){var D=fe();return D.useInsertionEffect(d,g)}function Er(d,g){var D=fe();return D.useLayoutEffect(d,g)}function Rr(d,g){var D=fe();return D.useCallback(d,g)}function Vt(d,g){var D=fe();return D.useMemo(d,g)}function vi(d,g,D){var O=fe();return O.useImperativeHandle(d,g,D)}function Ji(d,g){{var D=fe();return D.useDebugValue(d,g)}}function nu(){var d=fe();return d.useTransition()}function ua(d){var g=fe();return g.useDeferredValue(d)}function _e(){var d=fe();return d.useId()}function hi(d,g,D){var O=fe();return O.useSyncExternalStore(d,g,D)}var Ua=0,ru,au,iu,ou,uu,lu,su;function Jl(){}Jl.__reactDisabledLog=!0;function Cf(){{if(Ua===0){ru=console.log,au=console.info,iu=console.warn,ou=console.error,uu=console.group,lu=console.groupCollapsed,su=console.groupEnd;var d={configurable:!0,enumerable:!0,value:Jl,writable:!0};Object.defineProperties(console,{info:d,log:d,warn:d,error:d,group:d,groupCollapsed:d,groupEnd:d})}Ua++}}function cu(){{if(Ua--,Ua===0){var d={configurable:!0,enumerable:!0,writable:!0};Object.defineProperties(console,{log:Ht({},d,{value:ru}),info:Ht({},d,{value:au}),warn:Ht({},d,{value:iu}),error:Ht({},d,{value:ou}),group:Ht({},d,{value:uu}),groupCollapsed:Ht({},d,{value:lu}),groupEnd:Ht({},d,{value:su})})}Ua<0&&be("disabledDepth fell below zero. This is a bug in React. Please file an issue.")}}var mi=yt.ReactCurrentDispatcher,or;function Na(d,g,D){{if(or===void 0)try{throw Error()}catch(P){var O=P.stack.trim().match(/\n( *(at )?)/);or=O&&O[1]||""}return`
+`+or+d}}var ka=!1,Zi;{var fu=typeof WeakMap=="function"?WeakMap:Map;Zi=new fu}function Zl(d,g){if(!d||ka)return"";{var D=Zi.get(d);if(D!==void 0)return D}var O;ka=!0;var P=Error.prepareStackTrace;Error.prepareStackTrace=void 0;var le;le=mi.current,mi.current=null,Cf();try{if(g){var Z=function(){throw Error()};if(Object.defineProperty(Z.prototype,"props",{set:function(){throw Error()}}),typeof Reflect=="object"&&Reflect.construct){try{Reflect.construct(Z,[])}catch(Be){O=Be}Reflect.construct(d,[],Z)}else{try{Z.call()}catch(Be){O=Be}d.call(Z.prototype)}}else{try{throw Error()}catch(Be){O=Be}d()}}catch(Be){if(Be&&O&&typeof Be.stack=="string"){for(var pe=Be.stack.split(`
+`),Le=O.stack.split(`
+`),Ie=pe.length-1,et=Le.length-1;Ie>=1&&et>=0&&pe[Ie]!==Le[et];)et--;for(;Ie>=1&&et>=0;Ie--,et--)if(pe[Ie]!==Le[et]){if(Ie!==1||et!==1)do if(Ie--,et--,et<0||pe[Ie]!==Le[et]){var tt=`
+`+pe[Ie].replace(" at new "," at ");return d.displayName&&tt.includes("")&&(tt=tt.replace("",d.displayName)),typeof d=="function"&&Zi.set(d,tt),tt}while(Ie>=1&&et>=0);break}}}finally{ka=!1,mi.current=le,cu(),Error.prepareStackTrace=P}var ut=d?d.displayName||d.name:"",pt=ut?Na(ut):"";return typeof d=="function"&&Zi.set(d,pt),pt}function du(d,g,D){return Zl(d,!1)}function Tf(d){var g=d.prototype;return!!(g&&g.isReactComponent)}function za(d,g,D){if(d==null)return"";if(typeof d=="function")return Zl(d,Tf(d));if(typeof d=="string")return Na(d);switch(d){case F:return Na("Suspense");case ee:return Na("SuspenseList")}if(typeof d=="object")switch(d.$$typeof){case M:return du(d.render);case $:return za(d.type,g,D);case B:{var O=d,P=O._payload,le=O._init;try{return za(le(P),g,D)}catch{}}}return""}var es={},pu=yt.ReactDebugCurrentFrame;function eo(d){if(d){var g=d._owner,D=za(d.type,d._source,g?g.type:null);pu.setExtraStackFrame(D)}else pu.setExtraStackFrame(null)}function ts(d,g,D,O,P){{var le=Function.call.bind(gr);for(var Z in d)if(le(d,Z)){var pe=void 0;try{if(typeof d[Z]!="function"){var Le=Error((O||"React class")+": "+D+" type `"+Z+"` is invalid; it must be a function, usually from the `prop-types` package, but received `"+typeof d[Z]+"`.This often happens because of typos such as `PropTypes.function` instead of `PropTypes.func`.");throw Le.name="Invariant Violation",Le}pe=d[Z](g,Z,O,D,null,"SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED")}catch(Ie){pe=Ie}pe&&!(pe instanceof Error)&&(eo(P),be("%s: type specification of %s `%s` is invalid; the type checker function must return `null` or an `Error` but returned a %s. You may have forgotten to pass an argument to the type checker creator (arrayOf, instanceOf, objectOf, oneOf, oneOfType, and shape all require an argument).",O||"React class",D,Z,typeof pe),eo(null)),pe instanceof Error&&!(pe.message in es)&&(es[pe.message]=!0,eo(P),be("Failed %s type: %s",D,pe.message),eo(null))}}}function ze(d){if(d){var g=d._owner,D=za(d.type,d._source,g?g.type:null);Cn(D)}else Cn(null)}var vu;vu=!1;function hu(){if(Fe.current){var d=Dn(Fe.current.type);if(d)return`
+
+Check the render method of \``+d+"`."}return""}function ye(d){if(d!==void 0){var g=d.fileName.replace(/^.*[\\\/]/,""),D=d.lineNumber;return`
+
+Check your code at `+g+":"+D+"."}return""}function ns(d){return d!=null?ye(d.__source):""}var ln={};function yi(d){var g=hu();if(!g){var D=typeof d=="string"?d:d.displayName||d.name;D&&(g=`
+
+Check the top-level render call using <`+D+">.")}return g}function Fa(d,g){if(!(!d._store||d._store.validated||d.key!=null)){d._store.validated=!0;var D=yi(g);if(!ln[D]){ln[D]=!0;var O="";d&&d._owner&&d._owner!==Fe.current&&(O=" It was passed a child from "+Dn(d._owner.type)+"."),ze(d),be('Each child in a list should have a unique "key" prop.%s%s See https://reactjs.org/link/warning-keys for more information.',D,O),ze(null)}}}function rs(d,g){if(typeof d=="object"){if(Ct(d))for(var D=0;D",P=" Did you accidentally export a JSX literal instead of a component?"):Z=typeof d,be("React.createElement: type is invalid -- expected a string (for built-in components) or a class/function (for composite components) but got: %s.%s",Z,P)}var pe=re.apply(this,arguments);if(pe==null)return pe;if(O)for(var Le=2;Le10&>("Detected a large number of updates inside startTransition. If this is due to a subscription please re-write it to use React provided hooks. Otherwise concurrent mode guarantees are off the table."),O._updatedFibers.clear()}}}var to=!1,gi=null;function is(d){if(gi===null)try{var g=("require"+Math.random()).slice(0,7),D=l&&l[g];gi=D.call(l,"timers").setImmediate}catch{gi=function(P){to===!1&&(to=!0,typeof MessageChannel>"u"&&be("This browser does not have a MessageChannel implementation, so enqueuing tasks via await act(async () => ...) will fail. Please file an issue at https://github.com/facebook/react/issues if you encounter this warning."));var le=new MessageChannel;le.port1.onmessage=P,le.port2.postMessage(void 0)}}return gi(d)}var Ha=0,os=!1;function us(d){{var g=Ha;Ha++,de.current===null&&(de.current=[]);var D=de.isBatchingLegacy,O;try{if(de.isBatchingLegacy=!0,O=d(),!D&&de.didScheduleLegacyUpdate){var P=de.current;P!==null&&(de.didScheduleLegacyUpdate=!1,ro(P))}}catch(ut){throw la(g),ut}finally{de.isBatchingLegacy=D}if(O!==null&&typeof O=="object"&&typeof O.then=="function"){var le=O,Z=!1,pe={then:function(ut,pt){Z=!0,le.then(function(Be){la(g),Ha===0?no(Be,ut,pt):ut(Be)},function(Be){la(g),pt(Be)})}};return!os&&typeof Promise<"u"&&Promise.resolve().then(function(){}).then(function(){Z||(os=!0,be("You called act(async () => ...) without await. This could lead to unexpected testing behaviour, interleaving multiple act calls and mixing their scopes. You should - await act(async () => ...);"))}),pe}else{var Le=O;if(la(g),Ha===0){var Ie=de.current;Ie!==null&&(ro(Ie),de.current=null);var et={then:function(ut,pt){de.current===null?(de.current=[],no(Le,ut,pt)):ut(Le)}};return et}else{var tt={then:function(ut,pt){ut(Le)}};return tt}}}}function la(d){d!==Ha-1&&be("You seem to have overlapping act() calls, this is not supported. Be sure to await previous act() calls before making a new one. "),Ha=d}function no(d,g,D){{var O=de.current;if(O!==null)try{ro(O),is(function(){O.length===0?(de.current=null,g(d)):no(d,g,D)})}catch(P){D(P)}else g(d)}}var ja=!1;function ro(d){if(!ja){ja=!0;var g=0;try{for(;g1?j-1:0),oe=1;oe=1&&st>=0&&Se[we]!==ct[st];)st--;for(;we>=1&&st>=0;we--,st--)if(Se[we]!==ct[st]){if(we!==1||st!==1)do if(we--,st--,st<0||Se[we]!==ct[st]){var un=`
+`+Se[we].replace(" at new "," at ");return b.displayName&&un.includes("")&&(un=un.replace("",b.displayName)),typeof b=="function"&&xn.set(b,un),un}while(we>=1&&st>=0);break}}}finally{Wt=!1,mr.current=fe,Vr(),Error.prepareStackTrace=Ae}var Er=b?b.displayName||b.name:"",Rr=Er?tr(Er):"";return typeof b=="function"&&xn.set(b,Rr),Rr}function Ct(b,j,G){return Vn(b,!1)}function an(b){var j=b.prototype;return!!(j&&j.isReactComponent)}function jt(b,j,G){if(b==null)return"";if(typeof b=="function")return Vn(b,an(b));if(typeof b=="string")return tr(b);switch(b){case U:return tr("Suspense");case M:return tr("SuspenseList")}if(typeof b=="object")switch(b.$$typeof){case z:return Ct(b.render);case F:return jt(b.type,j,G);case ee:{var oe=b,Ae=oe._payload,fe=oe._init;try{return jt(fe(Ae),j,G)}catch{}}}return""}var wt=Object.prototype.hasOwnProperty,Ot={},Bn=Ye.ReactDebugCurrentFrame;function nr(b){if(b){var j=b._owner,G=jt(b.type,b._source,j?j.type:null);Bn.setExtraStackFrame(G)}else Bn.setExtraStackFrame(null)}function Dn(b,j,G,oe,Ae){{var fe=Function.call.bind(wt);for(var Te in b)if(fe(b,Te)){var Se=void 0;try{if(typeof b[Te]!="function"){var ct=Error((oe||"React class")+": "+G+" type `"+Te+"` is invalid; it must be a function, usually from the `prop-types` package, but received `"+typeof b[Te]+"`.This often happens because of typos such as `PropTypes.function` instead of `PropTypes.func`.");throw ct.name="Invariant Violation",ct}Se=b[Te](j,Te,oe,G,null,"SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED")}catch(we){Se=we}Se&&!(Se instanceof Error)&&(nr(Ae),Ce("%s: type specification of %s `%s` is invalid; the type checker function must return `null` or an `Error` but returned a %s. You may have forgotten to pass an argument to the type checker creator (arrayOf, instanceOf, objectOf, oneOf, oneOfType, and shape all require an argument).",oe||"React class",G,Te,typeof Se),nr(null)),Se instanceof Error&&!(Se.message in Ot)&&(Ot[Se.message]=!0,nr(Ae),Ce("Failed %s type: %s",G,Se.message),nr(null))}}}var gr=Array.isArray;function rr(b){return gr(b)}function hn(b){{var j=typeof Symbol=="function"&&Symbol.toStringTag,G=j&&b[Symbol.toStringTag]||b.constructor.name||"Object";return G}}function ar(b){try{return on(b),!1}catch{return!0}}function on(b){return""+b}function Pn(b){if(ar(b))return Ce("The provided key is an unsupported type %s. This value must be coerced to a string before before using it here.",hn(b)),on(b)}var bt=Ye.ReactCurrentOwner,ir={key:!0,ref:!0,__self:!0,__source:!0},ra,aa,q;q={};function re(b){if(wt.call(b,"ref")){var j=Object.getOwnPropertyDescriptor(b,"ref").get;if(j&&j.isReactWarning)return!1}return b.ref!==void 0}function De(b){if(wt.call(b,"key")){var j=Object.getOwnPropertyDescriptor(b,"key").get;if(j&&j.isReactWarning)return!1}return b.key!==void 0}function qe(b,j){if(typeof b.ref=="string"&&bt.current&&j&&bt.current.stateNode!==j){var G=xe(bt.current.type);q[G]||(Ce('Component "%s" contains the string ref "%s". Support for string refs will be removed in a future major release. This case cannot be automatically converted to an arrow function. We ask you to manually fix this case by using useRef() or createRef() instead. Learn more about using refs safely here: https://reactjs.org/link/strict-mode-string-ref',xe(bt.current.type),b.ref),q[G]=!0)}}function Ge(b,j){{var G=function(){ra||(ra=!0,Ce("%s: `key` is not a prop. Trying to access it will result in `undefined` being returned. If you need to access the same value within the child component, you should pass it as a different prop. (https://reactjs.org/link/special-props)",j))};G.isReactWarning=!0,Object.defineProperty(b,"key",{get:G,configurable:!0})}}function _t(b,j){{var G=function(){aa||(aa=!0,Ce("%s: `ref` is not a prop. Trying to access it will result in `undefined` being returned. If you need to access the same value within the child component, you should pass it as a different prop. (https://reactjs.org/link/special-props)",j))};G.isReactWarning=!0,Object.defineProperty(b,"ref",{get:G,configurable:!0})}}var St=function(b,j,G,oe,Ae,fe,Te){var Se={$$typeof:s,type:b,key:j,ref:G,props:Te,_owner:fe};return Se._store={},Object.defineProperty(Se._store,"validated",{configurable:!1,enumerable:!1,writable:!0,value:!1}),Object.defineProperty(Se,"_self",{configurable:!1,enumerable:!1,writable:!1,value:oe}),Object.defineProperty(Se,"_source",{configurable:!1,enumerable:!1,writable:!1,value:Ae}),Object.freeze&&(Object.freeze(Se.props),Object.freeze(Se)),Se};function wn(b,j,G,oe,Ae){{var fe,Te={},Se=null,ct=null;G!==void 0&&(Pn(G),Se=""+G),De(j)&&(Pn(j.key),Se=""+j.key),re(j)&&(ct=j.ref,qe(j,Ae));for(fe in j)wt.call(j,fe)&&!ir.hasOwnProperty(fe)&&(Te[fe]=j[fe]);if(b&&b.defaultProps){var we=b.defaultProps;for(fe in we)Te[fe]===void 0&&(Te[fe]=we[fe])}if(Se||ct){var st=typeof b=="function"?b.displayName||b.name||"Unknown":b;Se&&Ge(Te,st),ct&&_t(Te,st)}return St(b,Se,ct,Ae,oe,bt.current,Te)}}var Ze=Ye.ReactCurrentOwner,$n=Ye.ReactDebugCurrentFrame;function it(b){if(b){var j=b._owner,G=jt(b.type,b._source,j?j.type:null);$n.setExtraStackFrame(G)}else $n.setExtraStackFrame(null)}var ot;ot=!1;function Br(b){return typeof b=="object"&&b!==null&&b.$$typeof===s}function br(){{if(Ze.current){var b=xe(Ze.current.type);if(b)return`
+
+Check the render method of \``+b+"`."}return""}}function fi(b){{if(b!==void 0){var j=b.fileName.replace(/^.*[\\\/]/,""),G=b.lineNumber;return`
+
+Check your code at `+j+":"+G+"."}return""}}var Ki={};function tu(b){{var j=br();if(!j){var G=typeof b=="string"?b:b.displayName||b.name;G&&(j=`
+
+Check the top-level render call using <`+G+">.")}return j}}function di(b,j){{if(!b._store||b._store.validated||b.key!=null)return;b._store.validated=!0;var G=tu(j);if(Ki[G])return;Ki[G]=!0;var oe="";b&&b._owner&&b._owner!==Ze.current&&(oe=" It was passed a child from "+xe(b._owner.type)+"."),it(b),Ce('Each child in a list should have a unique "key" prop.%s%s See https://reactjs.org/link/warning-keys for more information.',G,oe),it(null)}}function pi(b,j){{if(typeof b!="object")return;if(rr(b))for(var G=0;G",Se=" Did you accidentally export a JSX literal instead of a component?"):we=typeof b,Ce("React.jsx: type is invalid -- expected a string (for built-in components) or a class/function (for composite components) but got: %s.%s",we,Se)}var st=wn(b,j,G,Ae,fe);if(st==null)return st;if(Te){var un=j.children;if(un!==void 0)if(oe)if(rr(un)){for(var Er=0;Er0?"{key: someKey, "+Vt.join(": ..., ")+": ...}":"{key: someKey}";if(!oa[Rr+vi]){var Ji=Vt.length>0?"{"+Vt.join(": ..., ")+": ...}":"{}";Ce(`A props object containing a "key" prop is being spread into JSX:
+ let props = %s;
+ <%s {...props} />
+React keys must be passed directly to JSX without using spread:
+ let props = %s;
+ <%s key={someKey} {...props} />`,vi,Rr,Ji,Rr),oa[Rr+vi]=!0}}return b===v?Ma(st):ia(st),st}}var Sr=Pr;Uh.Fragment=v,Uh.jsxDEV=Sr})();SE.exports=Uh;var kn=SE.exports,Nh={},RE={exports:{}},Zn={},CE={exports:{}},TE={};(function(l){/**
+ * @license React
+ * scheduler.development.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */(function(){typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart(new Error);var s=!1,p=!1,v=5;function y(q,re){var De=q.length;q.push(re),N(q,re,De)}function S(q){return q.length===0?null:q[0]}function f(q){if(q.length===0)return null;var re=q[0],De=q.pop();return De!==re&&(q[0]=De,z(q,De,0)),re}function N(q,re,De){for(var qe=De;qe>0;){var Ge=qe-1>>>1,_t=q[Ge];if(U(_t,re)>0)q[Ge]=re,q[qe]=_t,qe=Ge;else return}}function z(q,re,De){for(var qe=De,Ge=q.length,_t=Ge>>>1;qe<_t;){var St=(qe+1)*2-1,wn=q[St],Ze=St+1,$n=q[Ze];if(U(wn,re)<0)ZeDe&&(!q||nr()));){var qe=xe.callback;if(typeof qe=="function"){xe.callback=null,Ve=xe.priorityLevel;var Ge=xe.expirationTime<=De,_t=qe(Ge);De=l.unstable_now(),typeof _t=="function"?xe.callback=_t:xe===S(Rt)&&f(Rt),hr(De)}else f(Rt);xe=S(Rt)}if(xe!==null)return!0;var St=S(rt);return St!==null&&bt(Ht,St.startTime-De),!1}function Vr(q,re){switch(q){case M:case F:case ee:case $:case B:break;default:q=ee}var De=Ve;Ve=q;try{return re()}finally{Ve=De}}function mr(q){var re;switch(Ve){case M:case F:case ee:re=ee;break;default:re=Ve;break}var De=Ve;Ve=re;try{return q()}finally{Ve=De}}function yr(q){var re=Ve;return function(){var De=Ve;Ve=re;try{return q.apply(this,arguments)}finally{Ve=De}}}function tr(q,re,De){var qe=l.unstable_now(),Ge;if(typeof De=="object"&&De!==null){var _t=De.delay;typeof _t=="number"&&_t>0?Ge=qe+_t:Ge=qe}else Ge=qe;var St;switch(q){case M:St=de;break;case F:St=Fe;break;case B:St=Cn;break;case $:St=Gt;break;case ee:default:St=ge;break}var wn=Ge+St,Ze={id:Fn++,callback:re,priorityLevel:q,startTime:Ge,expirationTime:wn,sortIndex:-1};return Ge>qe?(Ze.sortIndex=Ge,y(rt,Ze),S(Rt)===null&&Ze===S(rt)&&(be?ir():be=!0,bt(Ht,Ge-qe))):(Ze.sortIndex=wn,y(Rt,Ze),!gt&&!yt&&(gt=!0,Pn(Tn))),Ze}function Wt(){}function xn(){!gt&&!yt&&(gt=!0,Pn(Tn))}function jn(){return S(Rt)}function Vn(q){q.callback=null}function Ct(){return Ve}var an=!1,jt=null,wt=-1,Ot=v,Bn=-1;function nr(){var q=l.unstable_now()-Bn;return!(q125){console.error("forceFrameRate takes a positive int between 0 and 125, forcing frame rates higher than 125 fps is not supported");return}q>0?Ot=Math.floor(1e3/q):Ot=v}var rr=function(){if(jt!==null){var q=l.unstable_now();Bn=q;var re=!0,De=!0;try{De=jt(re,q)}finally{De?hn():(an=!1,jt=null)}}else an=!1},hn;if(typeof er=="function")hn=function(){er(rr)};else if(typeof MessageChannel<"u"){var ar=new MessageChannel,on=ar.port2;ar.port1.onmessage=rr,hn=function(){on.postMessage(null)}}else hn=function(){It(rr,0)};function Pn(q){jt=q,an||(an=!0,hn())}function bt(q,re){wt=It(function(){q(l.unstable_now())},re)}function ir(){jr(wt),wt=-1}var ra=Dn,aa=null;l.unstable_IdlePriority=B,l.unstable_ImmediatePriority=M,l.unstable_LowPriority=$,l.unstable_NormalPriority=ee,l.unstable_Profiling=aa,l.unstable_UserBlockingPriority=F,l.unstable_cancelCallback=Vn,l.unstable_continueExecution=xn,l.unstable_forceFrameRate=gr,l.unstable_getCurrentPriorityLevel=Ct,l.unstable_getFirstCallbackNode=jn,l.unstable_next=mr,l.unstable_pauseExecution=Wt,l.unstable_requestPaint=ra,l.unstable_runWithPriority=Vr,l.unstable_scheduleCallback=tr,l.unstable_shouldYield=nr,l.unstable_wrapCallback=yr,typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStop=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStop(new Error)})()})(TE);CE.exports=TE;var pA=CE.exports;/**
+ * @license React
+ * react-dom.development.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */(function(){typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart(new Error);var l=Qi,s=pA,p=l.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,v=!1;function y(e){v=e}function S(e){if(!v){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r1?t-1:0),r=1;r2&&(e[0]==="o"||e[0]==="O")&&(e[1]==="n"||e[1]==="N")}function wn(e,t,n,r){if(n!==null&&n.type===ar)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":{if(r)return!1;if(n!==null)return!n.acceptsBooleans;var a=e.toLowerCase().slice(0,5);return a!=="data-"&&a!=="aria-"}default:return!1}}function Ze(e,t,n,r){if(t===null||typeof t>"u"||wn(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case bt:return!t;case ir:return t===!1;case ra:return isNaN(t);case aa:return isNaN(t)||t<1}return!1}function $n(e){return ot.hasOwnProperty(e)?ot[e]:null}function it(e,t,n,r,a,i,o){this.acceptsBooleans=t===Pn||t===bt||t===ir,this.attributeName=r,this.attributeNamespace=a,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=i,this.removeEmptyString=o}var ot={},Br=["children","dangerouslySetInnerHTML","defaultValue","defaultChecked","innerHTML","suppressContentEditableWarning","suppressHydrationWarning","style"];Br.forEach(function(e){ot[e]=new it(e,ar,!1,e,null,!1,!1)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0],n=e[1];ot[t]=new it(t,on,!1,n,null,!1,!1)}),["contentEditable","draggable","spellCheck","value"].forEach(function(e){ot[e]=new it(e,Pn,!1,e.toLowerCase(),null,!1,!1)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ot[e]=new it(e,Pn,!1,e,null,!1,!1)}),["allowFullScreen","async","autoFocus","autoPlay","controls","default","defer","disabled","disablePictureInPicture","disableRemotePlayback","formNoValidate","hidden","loop","noModule","noValidate","open","playsInline","readOnly","required","reversed","scoped","seamless","itemScope"].forEach(function(e){ot[e]=new it(e,bt,!1,e.toLowerCase(),null,!1,!1)}),["checked","multiple","muted","selected"].forEach(function(e){ot[e]=new it(e,bt,!0,e,null,!1,!1)}),["capture","download"].forEach(function(e){ot[e]=new it(e,ir,!1,e,null,!1,!1)}),["cols","rows","size","span"].forEach(function(e){ot[e]=new it(e,aa,!1,e,null,!1,!1)}),["rowSpan","start"].forEach(function(e){ot[e]=new it(e,ra,!1,e.toLowerCase(),null,!1,!1)});var br=/[\-\:]([a-z])/g,fi=function(e){return e[1].toUpperCase()};["accent-height","alignment-baseline","arabic-form","baseline-shift","cap-height","clip-path","clip-rule","color-interpolation","color-interpolation-filters","color-profile","color-rendering","dominant-baseline","enable-background","fill-opacity","fill-rule","flood-color","flood-opacity","font-family","font-size","font-size-adjust","font-stretch","font-style","font-variant","font-weight","glyph-name","glyph-orientation-horizontal","glyph-orientation-vertical","horiz-adv-x","horiz-origin-x","image-rendering","letter-spacing","lighting-color","marker-end","marker-mid","marker-start","overline-position","overline-thickness","paint-order","panose-1","pointer-events","rendering-intent","shape-rendering","stop-color","stop-opacity","strikethrough-position","strikethrough-thickness","stroke-dasharray","stroke-dashoffset","stroke-linecap","stroke-linejoin","stroke-miterlimit","stroke-opacity","stroke-width","text-anchor","text-decoration","text-rendering","underline-position","underline-thickness","unicode-bidi","unicode-range","units-per-em","v-alphabetic","v-hanging","v-ideographic","v-mathematical","vector-effect","vert-adv-y","vert-origin-x","vert-origin-y","word-spacing","writing-mode","xmlns:xlink","x-height"].forEach(function(e){var t=e.replace(br,fi);ot[t]=new it(t,on,!1,e,null,!1,!1)}),["xlink:actuate","xlink:arcrole","xlink:role","xlink:show","xlink:title","xlink:type"].forEach(function(e){var t=e.replace(br,fi);ot[t]=new it(t,on,!1,e,"http://www.w3.org/1999/xlink",!1,!1)}),["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(br,fi);ot[t]=new it(t,on,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)}),["tabIndex","crossOrigin"].forEach(function(e){ot[e]=new it(e,on,!1,e.toLowerCase(),null,!1,!1)});var Ki="xlinkHref";ot[Ki]=new it("xlinkHref",on,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1),["src","href","action","formAction"].forEach(function(e){ot[e]=new it(e,on,!1,e.toLowerCase(),null,!0,!0)});var tu=/^[\u0000-\u001F ]*j[\r\n\t]*a[\r\n\t]*v[\r\n\t]*a[\r\n\t]*s[\r\n\t]*c[\r\n\t]*r[\r\n\t]*i[\r\n\t]*p[\r\n\t]*t[\r\n\t]*\:/i,di=!1;function pi(e){!di&&tu.test(e)&&(di=!0,f("A future version of React will block javascript: URLs as a security precaution. Use event handlers instead if you can. If you need to generate unsafe HTML try using dangerouslySetInnerHTML instead. React was passed %s.",JSON.stringify(e)))}function ia(e,t,n,r){if(r.mustUseProperty){var a=r.propertyName;return e[a]}else{Bn(n,t),r.sanitizeURL&&pi(""+n);var i=r.attributeName,o=null;if(r.type===ir){if(e.hasAttribute(i)){var u=e.getAttribute(i);return u===""?!0:Ze(t,n,r,!1)?u:u===""+n?n:u}}else if(e.hasAttribute(i)){if(Ze(t,n,r,!1))return e.getAttribute(i);if(r.type===bt)return n;o=e.getAttribute(i)}return Ze(t,n,r,!1)?o===null?n:o:o===""+n?n:o}}function Ma(e,t,n,r){{if(!_t(t))return;if(!e.hasAttribute(t))return n===void 0?void 0:null;var a=e.getAttribute(t);return Bn(n,t),a===""+n?n:a}}function oa(e,t,n,r){var a=$n(t);if(!St(t,a,r)){if(Ze(t,n,a,r)&&(n=null),r||a===null){if(_t(t)){var i=t;n===null?e.removeAttribute(i):(Bn(n,t),e.setAttribute(i,""+n))}return}var o=a.mustUseProperty;if(o){var u=a.propertyName;if(n===null){var c=a.type;e[u]=c===bt?!1:""}else e[u]=n;return}var h=a.attributeName,m=a.attributeNamespace;if(n===null)e.removeAttribute(h);else{var R=a.type,E;R===bt||R===ir&&n===!0?E="":(Bn(n,h),E=""+n,a.sanitizeURL&&pi(E.toString())),m?e.setAttributeNS(m,h,E):e.setAttribute(h,E)}}}var Pr=Symbol.for("react.element"),Sr=Symbol.for("react.portal"),b=Symbol.for("react.fragment"),j=Symbol.for("react.strict_mode"),G=Symbol.for("react.profiler"),oe=Symbol.for("react.provider"),Ae=Symbol.for("react.context"),fe=Symbol.for("react.forward_ref"),Te=Symbol.for("react.suspense"),Se=Symbol.for("react.suspense_list"),ct=Symbol.for("react.memo"),we=Symbol.for("react.lazy"),st=Symbol.for("react.scope"),un=Symbol.for("react.debug_trace_mode"),Er=Symbol.for("react.offscreen"),Rr=Symbol.for("react.legacy_hidden"),Vt=Symbol.for("react.cache"),vi=Symbol.for("react.tracing_marker"),Ji=Symbol.iterator,nu="@@iterator";function ua(e){if(e===null||typeof e!="object")return null;var t=Ji&&e[Ji]||e[nu];return typeof t=="function"?t:null}var _e=Object.assign,hi=0,Ua,ru,au,iu,ou,uu,lu;function su(){}su.__reactDisabledLog=!0;function Jl(){{if(hi===0){Ua=console.log,ru=console.info,au=console.warn,iu=console.error,ou=console.group,uu=console.groupCollapsed,lu=console.groupEnd;var e={configurable:!0,enumerable:!0,value:su,writable:!0};Object.defineProperties(console,{info:e,log:e,warn:e,error:e,group:e,groupCollapsed:e,groupEnd:e})}hi++}}function Cf(){{if(hi--,hi===0){var e={configurable:!0,enumerable:!0,writable:!0};Object.defineProperties(console,{log:_e({},e,{value:Ua}),info:_e({},e,{value:ru}),warn:_e({},e,{value:au}),error:_e({},e,{value:iu}),group:_e({},e,{value:ou}),groupCollapsed:_e({},e,{value:uu}),groupEnd:_e({},e,{value:lu})})}hi<0&&f("disabledDepth fell below zero. This is a bug in React. Please file an issue.")}}var cu=p.ReactCurrentDispatcher,mi;function or(e,t,n){{if(mi===void 0)try{throw Error()}catch(a){var r=a.stack.trim().match(/\n( *(at )?)/);mi=r&&r[1]||""}return`
+`+mi+e}}var Na=!1,ka;{var Zi=typeof WeakMap=="function"?WeakMap:Map;ka=new Zi}function fu(e,t){if(!e||Na)return"";{var n=ka.get(e);if(n!==void 0)return n}var r;Na=!0;var a=Error.prepareStackTrace;Error.prepareStackTrace=void 0;var i;i=cu.current,cu.current=null,Jl();try{if(t){var o=function(){throw Error()};if(Object.defineProperty(o.prototype,"props",{set:function(){throw Error()}}),typeof Reflect=="object"&&Reflect.construct){try{Reflect.construct(o,[])}catch(w){r=w}Reflect.construct(e,[],o)}else{try{o.call()}catch(w){r=w}e.call(o.prototype)}}else{try{throw Error()}catch(w){r=w}e()}}catch(w){if(w&&r&&typeof w.stack=="string"){for(var u=w.stack.split(`
+`),c=r.stack.split(`
+`),h=u.length-1,m=c.length-1;h>=1&&m>=0&&u[h]!==c[m];)m--;for(;h>=1&&m>=0;h--,m--)if(u[h]!==c[m]){if(h!==1||m!==1)do if(h--,m--,m<0||u[h]!==c[m]){var R=`
+`+u[h].replace(" at new "," at ");return e.displayName&&R.includes("")&&(R=R.replace("",e.displayName)),typeof e=="function"&&ka.set(e,R),R}while(h>=1&&m>=0);break}}}finally{Na=!1,cu.current=i,Cf(),Error.prepareStackTrace=a}var E=e?e.displayName||e.name:"",x=E?or(E):"";return typeof e=="function"&&ka.set(e,x),x}function Zl(e,t,n){return fu(e,!0)}function du(e,t,n){return fu(e,!1)}function Tf(e){var t=e.prototype;return!!(t&&t.isReactComponent)}function za(e,t,n){if(e==null)return"";if(typeof e=="function")return fu(e,Tf(e));if(typeof e=="string")return or(e);switch(e){case Te:return or("Suspense");case Se:return or("SuspenseList")}if(typeof e=="object")switch(e.$$typeof){case fe:return du(e.render);case ct:return za(e.type,t,n);case we:{var r=e,a=r._payload,i=r._init;try{return za(i(a),t,n)}catch{}}}return""}function es(e){switch(e._debugOwner&&e._debugOwner.type,e._debugSource,e.tag){case $:return or(e.type);case Gt:return or("Lazy");case de:return or("Suspense");case rt:return or("SuspenseList");case z:case M:case ge:return du(e.type);case ne:return du(e.type.render);case U:return Zl(e.type);default:return""}}function pu(e){try{var t="",n=e;do t+=es(n),n=n.return;while(n);return t}catch(r){return`
+Error generating stack: `+r.message+`
+`+r.stack}}function eo(e,t,n){var r=e.displayName;if(r)return r;var a=t.displayName||t.name||"";return a!==""?n+"("+a+")":n}function ts(e){return e.displayName||"Context"}function ze(e){if(e==null)return null;if(typeof e.tag=="number"&&f("Received an unexpected object in getComponentNameFromType(). This is likely a bug in React. Please file an issue."),typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case b:return"Fragment";case Sr:return"Portal";case G:return"Profiler";case j:return"StrictMode";case Te:return"Suspense";case Se:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case Ae:var t=e;return ts(t)+".Consumer";case oe:var n=e;return ts(n._context)+".Provider";case fe:return eo(e,e.render,"ForwardRef");case ct:var r=e.displayName||null;return r!==null?r:ze(e.type)||"Memo";case we:{var a=e,i=a._payload,o=a._init;try{return ze(o(i))}catch{return null}}}return null}function vu(e,t,n){var r=t.displayName||t.name||"";return e.displayName||(r!==""?n+"("+r+")":n)}function hu(e){return e.displayName||"Context"}function ye(e){var t=e.tag,n=e.type;switch(t){case yt:return"Cache";case Ye:var r=n;return hu(r)+".Consumer";case Ce:var a=n;return hu(a._context)+".Provider";case Rt:return"DehydratedFragment";case ne:return vu(n,n.render,"ForwardRef");case ce:return"Fragment";case $:return n;case ee:return"Portal";case F:return"Root";case B:return"Text";case Gt:return ze(n);case Je:return n===j?"StrictMode":"Mode";case xe:return"Offscreen";case Ke:return"Profiler";case Fn:return"Scope";case de:return"Suspense";case rt:return"SuspenseList";case gt:return"TracingMarker";case U:case z:case Cn:case M:case Fe:case ge:if(typeof n=="function")return n.displayName||n.name||null;if(typeof n=="string")return n;break}return null}var ns=p.ReactDebugCurrentFrame,ln=null,yi=!1;function Fa(){{if(ln===null)return null;var e=ln._debugOwner;if(e!==null&&typeof e<"u")return ye(e)}return null}function rs(){return ln===null?"":pu(ln)}function Bt(){ns.getCurrentStack=null,ln=null,yi=!1}function ft(e){ns.getCurrentStack=e===null?null:rs,ln=e,yi=!1}function as(){return ln}function Yn(e){yi=e}function On(e){return""+e}function $r(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return hn(e),e;default:return""}}var xf={button:!0,checkbox:!0,image:!0,hidden:!0,radio:!0,reset:!0,submit:!0};function to(e,t){xf[t.type]||t.onChange||t.onInput||t.readOnly||t.disabled||t.value==null||f("You provided a `value` prop to a form field without an `onChange` handler. This will render a read-only field. If the field should be mutable use `defaultValue`. Otherwise, set either `onChange` or `readOnly`."),t.onChange||t.readOnly||t.disabled||t.checked==null||f("You provided a `checked` prop to a form field without an `onChange` handler. This will render a read-only field. If the field should be mutable use `defaultChecked`. Otherwise, set either `onChange` or `readOnly`.")}function gi(e){var t=e.type,n=e.nodeName;return n&&n.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function is(e){return e._valueTracker}function Ha(e){e._valueTracker=null}function os(e){var t="";return e&&(gi(e)?t=e.checked?"true":"false":t=e.value),t}function us(e){var t=gi(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t);hn(e[t]);var r=""+e[t];if(!(e.hasOwnProperty(t)||typeof n>"u"||typeof n.get!="function"||typeof n.set!="function")){var a=n.get,i=n.set;Object.defineProperty(e,t,{configurable:!0,get:function(){return a.call(this)},set:function(u){hn(u),r=""+u,i.call(this,u)}}),Object.defineProperty(e,t,{enumerable:n.enumerable});var o={getValue:function(){return r},setValue:function(u){hn(u),r=""+u},stopTracking:function(){Ha(e),delete e[t]}};return o}}function la(e){is(e)||(e._valueTracker=us(e))}function no(e){if(!e)return!1;var t=is(e);if(!t)return!0;var n=t.getValue(),r=os(e);return r!==n?(t.setValue(r),!0):!1}function ja(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}var ro=!1,ls=!1,ss=!1,cs=!1;function fs(e){var t=e.type==="checkbox"||e.type==="radio";return t?e.checked!=null:e.value!=null}function d(e,t){var n=e,r=t.checked,a=_e({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:r??n._wrapperState.initialChecked});return a}function g(e,t){to("input",t),t.checked!==void 0&&t.defaultChecked!==void 0&&!ls&&(f("%s contains an input of type %s with both checked and defaultChecked props. Input elements must be either controlled or uncontrolled (specify either the checked prop, or the defaultChecked prop, but not both). Decide between using a controlled or uncontrolled input element and remove one of these props. More info: https://reactjs.org/link/controlled-components",Fa()||"A component",t.type),ls=!0),t.value!==void 0&&t.defaultValue!==void 0&&!ro&&(f("%s contains an input of type %s with both value and defaultValue props. Input elements must be either controlled or uncontrolled (specify either the value prop, or the defaultValue prop, but not both). Decide between using a controlled or uncontrolled input element and remove one of these props. More info: https://reactjs.org/link/controlled-components",Fa()||"A component",t.type),ro=!0);var n=e,r=t.defaultValue==null?"":t.defaultValue;n._wrapperState={initialChecked:t.checked!=null?t.checked:t.defaultChecked,initialValue:$r(t.value!=null?t.value:r),controlled:fs(t)}}function D(e,t){var n=e,r=t.checked;r!=null&&oa(n,"checked",r,!1)}function O(e,t){var n=e;{var r=fs(t);!n._wrapperState.controlled&&r&&!cs&&(f("A component is changing an uncontrolled input to be controlled. This is likely caused by the value changing from undefined to a defined value, which should not happen. Decide between using a controlled or uncontrolled input element for the lifetime of the component. More info: https://reactjs.org/link/controlled-components"),cs=!0),n._wrapperState.controlled&&!r&&!ss&&(f("A component is changing a controlled input to be uncontrolled. This is likely caused by the value changing from a defined to undefined, which should not happen. Decide between using a controlled or uncontrolled input element for the lifetime of the component. More info: https://reactjs.org/link/controlled-components"),ss=!0)}D(e,t);var a=$r(t.value),i=t.type;if(a!=null)i==="number"?(a===0&&n.value===""||n.value!=a)&&(n.value=On(a)):n.value!==On(a)&&(n.value=On(a));else if(i==="submit"||i==="reset"){n.removeAttribute("value");return}t.hasOwnProperty("value")?pe(n,t.type,a):t.hasOwnProperty("defaultValue")&&pe(n,t.type,$r(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(n.defaultChecked=!!t.defaultChecked)}function P(e,t,n){var r=e;if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var a=t.type,i=a==="submit"||a==="reset";if(i&&(t.value===void 0||t.value===null))return;var o=On(r._wrapperState.initialValue);n||o!==r.value&&(r.value=o),r.defaultValue=o}var u=r.name;u!==""&&(r.name=""),r.defaultChecked=!r.defaultChecked,r.defaultChecked=!!r._wrapperState.initialChecked,u!==""&&(r.name=u)}function le(e,t){var n=e;O(n,t),Z(n,t)}function Z(e,t){var n=t.name;if(t.type==="radio"&&n!=null){for(var r=e;r.parentNode;)r=r.parentNode;Bn(n,"name");for(var a=r.querySelectorAll("input[name="+JSON.stringify(""+n)+'][type="radio"]'),i=0;i.")))}):t.dangerouslySetInnerHTML!=null&&(et||(et=!0,f("Pass a `value` prop if you set dangerouslyInnerHTML so React knows which value should be selected.")))),t.selected!=null&&!Le&&(f("Use the `defaultValue` or `value` props on