diff --git a/README.md b/README.md
index e4d31503..4e51ca9b 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,7 @@
+
+
+
+
# CloudHarness
CloudHarness is a base infrastructure facilitator for micro-service based applications deployed on Kubernetes.
@@ -12,6 +16,16 @@ What building your cluster application with CloudHarness gives to you:
* Submit batch and asynchronous workflows - based on Argo
* Orchestrate Micro-services - based on Kafka
+## Command line tools
+
+CloudHarness provides the following command line tools to help application scaffolding and deployment.
+
+* `harness-deployment` - generate the helm chart to deploy on Kubernetes.
+* `harness-application` - create a new CloudHarness REST application.
+* `harness-codefresh` - generate the Codefresh continuous deployment specification.
+* `harness-generate` - generates server and client code for all CloudHarness REST applications.
+
+
## Get started
### Prerequisites
@@ -104,7 +118,12 @@ images are pushed after the build.
Any public registry will work. The suggested way to go is to install a registry on localhost:5000 inside
the kube cluster and push on that registry, also forwarded to localhost.
-More info inside `./registry/README.md`.
+On minikube can use the registry addon:
+
+`minikube addons enable registry`
+
+Then forward with:
+`kubectl port-forward --namespace kube-system $(kubectl get po -n kube-system | grep registry | grep -v proxy | \awk '{print $1;}') 5000:5000`
### Argo installation
diff --git a/applications/README.md b/applications/README.md
index 77bcd284..251e0b70 100644
--- a/applications/README.md
+++ b/applications/README.md
@@ -34,7 +34,7 @@ Dockerfile
## Define an application without openapi
1. Add the application inside `applications/[APPLICATION_NAME]` with a Docker file in it. The Docker file must inherit
-from `r.cfcr.io/tarelli/cloudharness-base` in order to get access to cloudharness libraries.
+from `cloudharness-base` in order to get access to cloudharness libraries.
1. Define values.yaml inside the file in order to specify custom values for the application
diff --git a/applications/accounts/deploy/resources/realm.json b/applications/accounts/deploy/resources/realm.json
index 75532b5f..1ef6dc59 100644
--- a/applications/accounts/deploy/resources/realm.json
+++ b/applications/accounts/deploy/resources/realm.json
@@ -3,14 +3,14 @@
"realm": {{ .Values.namespace | quote }},
"enabled": true,
"sslRequired": "external",
- "loginTheme": "custom",
- "accountTheme": "custom",
- "adminTheme": "custom",
- "emailTheme": "custom",
+ "loginTheme": "keycloak",
+ "accountTheme": "keycloak",
+ "adminTheme": "keycloak",
+ "emailTheme": "keycloak",
"registrationAllowed": true,
- "registrationEmailAsUsername": true,
+ "registrationEmailAsUsername": false,
"rememberMe": true,
- "verifyEmail": true,
+ "verifyEmail": false,
"loginWithEmailAllowed": true,
"duplicateEmailsAllowed": false,
"resetPasswordAllowed": true,
@@ -49,7 +49,7 @@
{
"id": "9a6a2560-c6be-4493-8bd5-3fdc4522d82b",
"clientId": {{ .Values.apps.accounts.client.id | quote }},
- "baseUrl": {{ printf "https://%s" .Values.domain | quote }},
+ "baseUrl": {{ printf "%s://accounts.%s" (ternary "http" "https" (not .Values.tls)) .Values.domain | quote }},
"surrogateAuthRequired": false,
"enabled": true,
"clientAuthenticatorType": "client-secret",
@@ -84,7 +84,7 @@
{
"id": "111caf43-3d26-484d-8dc9-7fa911ac221c",
"clientId": {{ .Values.apps.accounts.webclient.id | quote }},
- "baseUrl": {{ printf "https://%s.%s" .Values.apps.events.subdomain .Values.domain | quote }},
+ "baseUrl": {{ printf "%s://accounts.%s" (ternary "http" "https" (not .Values.tls)) .Values.domain | quote }},
"surrogateAuthRequired": false,
"enabled": true,
"clientAuthenticatorType": "client-secret",
@@ -93,10 +93,12 @@
"*"
],
"webOrigins": [
- "*"
+ "*",
+ "+"
],
"standardFlowEnabled": true,
"directAccessGrantsEnabled": true,
+ "publicClient": true,
"protocol": "openid-connect",
"fullScopeAllowed": true,
"defaultClientScopes": [
@@ -623,5 +625,5 @@
}
}
],
- "keycloakVersion": "6.0.1"
+ "keycloakVersion": "9.0.2"
}
\ No newline at end of file
diff --git a/applications/accounts/keycloak-gatekeeper/Dockerfile b/applications/accounts/keycloak-gatekeeper/Dockerfile
deleted file mode 100644
index 50168e3a..00000000
--- a/applications/accounts/keycloak-gatekeeper/Dockerfile
+++ /dev/null
@@ -1,37 +0,0 @@
-FROM alpine:3.8
-
-ENV NAME keycloak-gatekeeper
-ENV KEYCLOAK_VERSION 6.0.1
-ENV GOOS linux
-ENV GOARCH amd64
-ENV DOMAIN cloudharness.local
-
-LABEL Name=keycloak-gatekeeper \
- Release=https://github.com/keycloak/keycloak-gatekeeper \
- Url=https://github.com/keycloak/keycloak-gatekeeper \
- Help=https://issues.jboss.org/projects/KEYCLOAK
-
-RUN apk add --no-cache curl tar bash
-RUN apk add --update openssl && \
- rm -rf /var/cache/apk/*
-
-RUN openssl genrsa -des3 -passout pass:x -out server.pass.key 2048 && \
- openssl rsa -passin pass:x -in server.pass.key -out server.key && \
- rm server.pass.key && \
- openssl req -new -key server.key -out server.csr \
- -subj "/C=UK/ST=Oxford/L=Leamington/O=OrgName/OU=IT Department/CN=*.${DOMAIN}" && \
- openssl x509 -req -days 365 -in server.csr -signkey server.key -out /usr/local/share/ca-certificates/cacert.crt
-RUN cat /usr/local/share/ca-certificates/cacert.crt
-WORKDIR /opt
-RUN echo "https://downloads.jboss.org/keycloak/$KEYCLOAK_VERSION/gatekeeper/$NAME-$GOOS-$GOARCH.tar.gz"
-RUN curl -fssL "https://downloads.jboss.org/keycloak/$KEYCLOAK_VERSION/gatekeeper/$NAME-$GOOS-$GOARCH.tar.gz" | tar -xz && chmod +x /opt/$NAME
-
-
-# Update the CA list for ubuntu
-RUN update-ca-certificates --verbose
-# include your CA in httplib2 (required for hand-shake between UI servers and keycloak)
-# RUN cat /usr/local/share/ca-certificates/extra/cacert.crt >> /usr/local/lib/python3.7/site-packages/certifi/cacert.pem
-
-####
-
-ENTRYPOINT [ "/opt/keycloak-gatekeeper" ]
\ No newline at end of file
diff --git a/applications/accounts/themes/custom/README.md b/applications/accounts/themes/custom/README.md
new file mode 100644
index 00000000..e44db0ff
--- /dev/null
+++ b/applications/accounts/themes/custom/README.md
@@ -0,0 +1,3 @@
+# Custom theme for Keycloak
+
+For custom theme development see: [Keycloak themes development](https://www.keycloak.org/docs/latest/server_development/index.html#_themes)
diff --git a/applications/argo/deploy/values.yaml b/applications/argo/deploy/values.yaml
index b6987160..caea801f 100644
--- a/applications/argo/deploy/values.yaml
+++ b/applications/argo/deploy/values.yaml
@@ -1,9 +1,12 @@
-enabled: true
-name: argo-server
-subdomain: argo
-port: 2746
+harness:
+ subdomain: argo
+ secured: true
+ name: argo-proxy1
+ service:
+ port: 80
+ auto: false
+ name: argo-ui
+
serviceaccount: argo-workflows
-namespace: argo
+namespace: argo.svc.cluster.local
workflows_namespace: argo-workflows
-secureme: true
-autoservice: false
\ No newline at end of file
diff --git a/applications/common/api/config.json b/applications/common/api/config.json
new file mode 100644
index 00000000..b94af9f4
--- /dev/null
+++ b/applications/common/api/config.json
@@ -0,0 +1,3 @@
+{
+ "packageName": "common"
+}
\ No newline at end of file
diff --git a/applications/common/api/openapi.yaml b/applications/common/api/openapi.yaml
new file mode 100644
index 00000000..37e7e958
--- /dev/null
+++ b/applications/common/api/openapi.yaml
@@ -0,0 +1,35 @@
+openapi: 3.0.0
+info:
+ description: Cloud Harness Platform - Reference CH service API
+ license:
+ name: UNLICENSED
+ title: CH common service API
+ version: 0.1.0
+servers:
+- description: SwaggerHub API Auto Mocking
+ url: /api
+tags:
+- description: Sentry
+ name: Sentry
+paths:
+ /sentry/getdsn/{appname}:
+ parameters:
+ - in: path
+ name: appname
+ schema:
+ type: string
+ required: true
+ get:
+ tags:
+ - Sentry
+ description: Gets the Sentry DSN for a given application
+ operationId: getdsn
+ responses:
+ '200':
+ description: Sentry DSN for the given application
+ content:
+ application/json:
+ schema:
+ type: string
+ summary: Gets the Sentry DSN for a given application
+ x-openapi-router-controller: common.controllers.sentry_controller
diff --git a/applications/common/deploy/values.yaml b/applications/common/deploy/values.yaml
new file mode 100644
index 00000000..6e68ff43
--- /dev/null
+++ b/applications/common/deploy/values.yaml
@@ -0,0 +1,19 @@
+harness:
+ subdomain: common
+ secured: false
+ service:
+ auto: true
+ port: 8080
+ name: common
+ deployment:
+ auto: true
+ name: common
+ port: 8080
+ resources:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ cpu: 200m
+
\ No newline at end of file
diff --git a/applications/samples/src/.dockerignore b/applications/common/server/.dockerignore
similarity index 100%
rename from applications/samples/src/.dockerignore
rename to applications/common/server/.dockerignore
diff --git a/applications/samples/src/.gitignore b/applications/common/server/.gitignore
similarity index 100%
rename from applications/samples/src/.gitignore
rename to applications/common/server/.gitignore
diff --git a/applications/workflows/src/.openapi-generator-ignore b/applications/common/server/.openapi-generator-ignore
similarity index 96%
rename from applications/workflows/src/.openapi-generator-ignore
rename to applications/common/server/.openapi-generator-ignore
index b09fd633..accd8f27 100644
--- a/applications/workflows/src/.openapi-generator-ignore
+++ b/applications/common/server/.openapi-generator-ignore
@@ -22,6 +22,4 @@
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md
-Dockerfile
-*/controllers/*
-*/models/*
\ No newline at end of file
+*/controllers/*
\ No newline at end of file
diff --git a/applications/samples/src/.travis.yml b/applications/common/server/.travis.yml
similarity index 100%
rename from applications/samples/src/.travis.yml
rename to applications/common/server/.travis.yml
diff --git a/applications/workflows/src/Dockerfile b/applications/common/server/Dockerfile
similarity index 53%
rename from applications/workflows/src/Dockerfile
rename to applications/common/server/Dockerfile
index 862083cd..de16ed1b 100644
--- a/applications/workflows/src/Dockerfile
+++ b/applications/common/server/Dockerfile
@@ -1,7 +1,10 @@
-ARG REGISTRY=r.cfcr.io/tarelli/
+ARG REGISTRY
ARG TAG=latest
FROM ${REGISTRY}cloudharness-base:${TAG}
+RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev
+#RUN apk add psycopg2 libpq-dev python-dev
+
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
@@ -13,6 +16,9 @@ COPY . /usr/src/app
EXPOSE 8080
+ENV FLASK_ENV=production
+ENV APP_SETTINGS=common.config.ProductionConfig
+
ENTRYPOINT ["python3"]
-CMD ["-m", "workflows_api"]
\ No newline at end of file
+CMD ["-m", "common"]
diff --git a/applications/workflows/src/README.md b/applications/common/server/README.md
similarity index 84%
rename from applications/workflows/src/README.md
rename to applications/common/server/README.md
index 0f31414a..86f05fa1 100644
--- a/applications/workflows/src/README.md
+++ b/applications/common/server/README.md
@@ -15,19 +15,19 @@ To run the server, please execute the following from the root directory:
```
pip3 install -r requirements.txt
-python3 -m workflows_api
+python3 -m openapi_server
```
and open your browser to here:
```
-http://localhost:8080/ui/
+http://localhost:8080/api/ui/
```
Your OpenAPI definition lives here:
```
-http://localhost:8080/openapi.json
+http://localhost:8080/api/openapi.json
```
To launch the integration tests, use tox:
@@ -42,8 +42,8 @@ To run the server on a Docker container, please execute the following from the r
```bash
# building the image
-docker build -t workflows_api .
+docker build -t openapi_server .
# starting up a container
-docker run -p 8080:8080 workflows_api
+docker run -p 8080:8080 openapi_server
```
\ No newline at end of file
diff --git a/applications/samples/src/api_samples/__init__.py b/applications/common/server/common/__init__.py
similarity index 100%
rename from applications/samples/src/api_samples/__init__.py
rename to applications/common/server/common/__init__.py
diff --git a/applications/common/server/common/__main__.py b/applications/common/server/common/__main__.py
new file mode 100644
index 00000000..9d811a33
--- /dev/null
+++ b/applications/common/server/common/__main__.py
@@ -0,0 +1,23 @@
+import os
+
+from flask import Flask
+from flask_cors import CORS
+
+import connexion
+
+from common import encoder
+
+def main():
+ app = connexion.App(__name__, specification_dir='./openapi/')
+ app.app.config.from_object(os.environ['APP_SETTINGS'])
+ app.app.json_encoder = encoder.JSONEncoder
+ app.add_api('openapi.yaml',
+ arguments={'title': 'CH service API'},
+ pythonic_params=True)
+ from .repository.db import open_db
+ open_db(app)
+ cors = CORS(app.app, resources={r"/api/*": {"origins": "*"}})
+ app.run(port=8080)
+
+if __name__ == '__main__':
+ main()
diff --git a/applications/common/server/common/config.py b/applications/common/server/common/config.py
new file mode 100644
index 00000000..28dde6b1
--- /dev/null
+++ b/applications/common/server/common/config.py
@@ -0,0 +1,41 @@
+import os
+import logging
+
+from cloudharness.utils.config import CloudharnessConfig as conf
+
+basedir = os.path.abspath(os.path.dirname(__file__))
+
+
+class Config(object):
+ DEBUG = False
+ TESTING = False
+ CSRF_ENABLED = True
+ SQLALCHEMY_TRACK_MODIFICATIONS = False
+ SECRET_KEY = 'this-really-needs-to-be-changed'
+ SENTRY_POSTGRES_APP = None
+ SENTRY_APP = None
+ try:
+ SENTRY_POSTGRES_APP = conf.get_application_by_filter(name='sentry')[0].postgres
+ SENTRY_APP = conf.get_application_by_filter(name='sentry')[0].name
+ SQLALCHEMY_DATABASE_URI = f'postgresql+psycopg2://{SENTRY_POSTGRES_APP.user}:{SENTRY_POSTGRES_APP.password}@{SENTRY_POSTGRES_APP.name}:{SENTRY_POSTGRES_APP.port}/{SENTRY_POSTGRES_APP.initialdb}'
+ except:
+ logging.error("Cannot configure SENTRY")
+
+
+
+class ProductionConfig(Config):
+ DEBUG = False
+
+
+class StagingConfig(Config):
+ DEVELOPMENT = True
+ DEBUG = True
+
+
+class DevelopmentConfig(Config):
+ DEVELOPMENT = True
+ DEBUG = True
+
+
+class TestingConfig(Config):
+ TESTING = True
diff --git a/applications/samples/src/api_samples/controllers/__init__.py b/applications/common/server/common/controllers/__init__.py
similarity index 100%
rename from applications/samples/src/api_samples/controllers/__init__.py
rename to applications/common/server/common/controllers/__init__.py
diff --git a/applications/workflows/server/workflows_api/controllers/security_controller_.py b/applications/common/server/common/controllers/security_controller_.py
similarity index 100%
rename from applications/workflows/server/workflows_api/controllers/security_controller_.py
rename to applications/common/server/common/controllers/security_controller_.py
diff --git a/applications/common/server/common/controllers/sentry_controller.py b/applications/common/server/common/controllers/sentry_controller.py
new file mode 100644
index 00000000..8ffd6cdc
--- /dev/null
+++ b/applications/common/server/common/controllers/sentry_controller.py
@@ -0,0 +1,37 @@
+import connexion
+import six
+import requests
+import urllib
+
+from common import util
+
+from common.repository.sentry import get_token, get_dsn, SentryProjectNotFound
+
+from cloudharness.utils.env import get_sentry_service_cluster_address
+from cloudharness import applications
+
+
+def getdsn(appname): # noqa: E501
+ """
+ Gets the Sentry DSN for a given application
+
+ :param appname:
+ :type appname: str
+
+ :rtype: str
+ """
+ ch_app = applications.get_configuration(appname)
+ if ch_app.is_sentry_enabled():
+ try:
+ dsn = get_dsn(appname)
+ except SentryProjectNotFound as e:
+ # if project not found, create one
+ sentry_api_token = get_token()
+ headers = {'Authorization': 'Bearer ' + sentry_api_token}
+ url = get_sentry_service_cluster_address() + f'/api/0/teams/sentry/sentry/projects/'
+ data = {'name' : appname}
+ response = requests.post(url, data, headers=headers, verify=False)
+ dsn = get_dsn(appname)
+ else:
+ dsn = ''
+ return {'dsn': dsn}
diff --git a/applications/samples/src/api_samples/encoder.py b/applications/common/server/common/encoder.py
similarity index 91%
rename from applications/samples/src/api_samples/encoder.py
rename to applications/common/server/common/encoder.py
index 7a200e51..a88fe1fc 100644
--- a/applications/samples/src/api_samples/encoder.py
+++ b/applications/common/server/common/encoder.py
@@ -1,7 +1,7 @@
from connexion.apps.flask_app import FlaskJSONEncoder
import six
-from api_samples.models.base_model_ import Model
+from common.models.base_model_ import Model
class JSONEncoder(FlaskJSONEncoder):
diff --git a/applications/common/server/common/models.py b/applications/common/server/common/models.py
new file mode 100644
index 00000000..8817fa65
--- /dev/null
+++ b/applications/common/server/common/models.py
@@ -0,0 +1,19 @@
+from __main__ import db
+from sqlalchemy.dialects.postgresql import JSON
+
+
+class Result(db.Model):
+ __tablename__ = 'results'
+
+ id = db.Column(db.Integer, primary_key=True)
+ url = db.Column(db.String())
+ result_all = db.Column(JSON)
+ result_no_stop_words = db.Column(JSON)
+
+ def __init__(self, url, result_all, result_no_stop_words):
+ self.url = url
+ self.result_all = result_all
+ self.result_no_stop_words = result_no_stop_words
+
+ def __repr__(self):
+ return ''.format(self.id)
\ No newline at end of file
diff --git a/applications/common/server/common/models/__init__.py b/applications/common/server/common/models/__init__.py
new file mode 100644
index 00000000..2221d931
--- /dev/null
+++ b/applications/common/server/common/models/__init__.py
@@ -0,0 +1,5 @@
+# coding: utf-8
+
+# flake8: noqa
+from __future__ import absolute_import
+# import models into model package
diff --git a/applications/samples/src/api_samples/models/base_model_.py b/applications/common/server/common/models/base_model_.py
similarity index 98%
rename from applications/samples/src/api_samples/models/base_model_.py
rename to applications/common/server/common/models/base_model_.py
index 3ace029a..f0fd4a22 100644
--- a/applications/samples/src/api_samples/models/base_model_.py
+++ b/applications/common/server/common/models/base_model_.py
@@ -3,7 +3,7 @@
import six
import typing
-from api_samples import util
+from common import util
T = typing.TypeVar('T')
diff --git a/applications/common/server/common/openapi/openapi.yaml b/applications/common/server/common/openapi/openapi.yaml
new file mode 100644
index 00000000..992e266e
--- /dev/null
+++ b/applications/common/server/common/openapi/openapi.yaml
@@ -0,0 +1,39 @@
+openapi: 3.0.0
+info:
+ description: Cloud Harness Platform - Reference CH service API
+ license:
+ name: UNLICENSED
+ title: CH common service API
+ version: 0.1.0
+servers:
+- description: SwaggerHub API Auto Mocking
+ url: /api
+tags:
+- description: Sentry
+ name: Sentry
+paths:
+ /sentry/getdsn/{appname}:
+ get:
+ description: Gets the Sentry DSN for a given application
+ operationId: getdsn
+ parameters:
+ - explode: false
+ in: path
+ name: appname
+ required: true
+ schema:
+ type: string
+ style: simple
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ type: string
+ description: Sentry DSN for the given application
+ summary: Gets the Sentry DSN for a given application
+ tags:
+ - Sentry
+ x-openapi-router-controller: common.controllers.sentry_controller
+components:
+ schemas: {}
diff --git a/applications/samples/src/api_samples/service/__init__.py b/applications/common/server/common/repository/__init__.py
similarity index 100%
rename from applications/samples/src/api_samples/service/__init__.py
rename to applications/common/server/common/repository/__init__.py
diff --git a/applications/common/server/common/repository/db.py b/applications/common/server/common/repository/db.py
new file mode 100644
index 00000000..a76cb8bc
--- /dev/null
+++ b/applications/common/server/common/repository/db.py
@@ -0,0 +1,15 @@
+from flask_sqlalchemy import SQLAlchemy
+
+db = None
+
+def get_db():
+ global db
+ if not db:
+ raise Exception('Database not open!')
+ return db
+
+def open_db(app):
+ global db
+ if not db:
+ db = SQLAlchemy(app.app)
+ return db
diff --git a/applications/common/server/common/repository/sentry.py b/applications/common/server/common/repository/sentry.py
new file mode 100644
index 00000000..3b9fbc60
--- /dev/null
+++ b/applications/common/server/common/repository/sentry.py
@@ -0,0 +1,61 @@
+from sqlalchemy.sql import text
+
+from cloudharness.utils.env import get_service_public_address
+
+from .db import get_db
+
+class SentryProjectNotFound(Exception):
+ pass
+
+def _get_api_token():
+ # ToDo: may be we can use here a dynamic token, but for now let's use a hard coded one
+ api_token = 'afe75d802007405dbc0c2fb1db4cc8b06b981017f58944d0afac700f743ee06a'
+ s = text('''
+ select token from sentry_apitoken
+ where token=:api_token
+ ''')
+ token = get_db().engine.execute(s,
+ api_token=api_token
+ ).fetchall()
+ if len(token) == 0:
+ # token is not present in the Sentry database, let's create it
+ s = text('''
+ insert into sentry_apitoken(user_id, token, scopes, date_added, scope_list)
+ values (1, :api_token, 0, now(), :scope_list)
+ ''')
+ get_db().engine.execute(s,
+ api_token=api_token,
+ scope_list='{event:admin,event:read,'
+ 'member:read,member:admin,'
+ 'project:read,project:releases,project:admin,project:write,'
+ 'team:read,team:write,team:admin,'
+ 'org:read,org:write,org:admin}'
+ )
+ return _get_api_token()
+ else:
+ # return the first column from the first row of the query result
+ return token[0][0]
+
+def get_token():
+ return _get_api_token()
+
+def get_dsn(appname):
+ s = text('''
+ select public_key, p.id
+ from sentry_projectkey pkey
+ join sentry_project p on pkey.project_id=p.id
+ where p.slug=:project_slug
+ ''')
+ public_key = get_db().engine.execute(s,
+ project_slug=appname
+ ).fetchall()
+ if len(public_key) == 0:
+ raise SentryProjectNotFound('Application not found!')
+ else:
+ dsn = public_key[0][0]
+ app_id = public_key[0][1]
+ sentry_host = get_service_public_address('sentry')
+ dsn = f'https://{dsn}@{sentry_host}/{app_id}'
+
+ # return the first column from the first row of the query result
+ return dsn
diff --git a/applications/samples/src/api_samples/test/__init__.py b/applications/common/server/common/test/__init__.py
similarity index 89%
rename from applications/samples/src/api_samples/test/__init__.py
rename to applications/common/server/common/test/__init__.py
index a2dbaf9d..827a6d59 100644
--- a/applications/samples/src/api_samples/test/__init__.py
+++ b/applications/common/server/common/test/__init__.py
@@ -3,7 +3,7 @@
import connexion
from flask_testing import TestCase
-from api_samples.encoder import JSONEncoder
+from common.encoder import JSONEncoder
class BaseTestCase(TestCase):
diff --git a/applications/common/server/common/test/test_sentry_controller.py b/applications/common/server/common/test/test_sentry_controller.py
new file mode 100644
index 00000000..a01d90c9
--- /dev/null
+++ b/applications/common/server/common/test/test_sentry_controller.py
@@ -0,0 +1,32 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+import unittest
+
+from flask import json
+from six import BytesIO
+
+from common.test import BaseTestCase
+
+
+class TestSentryController(BaseTestCase):
+ """SentryController integration test stubs"""
+
+ def test_getdsn(self):
+ """Test case for getdsn
+
+ Gets the Sentry DSN for a given application
+ """
+ headers = {
+ 'Accept': 'application/json',
+ }
+ response = self.client.open(
+ '/api/sentry/getdsn/{appname}'.format(appname='appname_example'),
+ method='GET',
+ headers=headers)
+ self.assert200(response,
+ 'Response body is : ' + response.data.decode('utf-8'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/applications/samples/src/api_samples/typing_utils.py b/applications/common/server/common/typing_utils.py
similarity index 100%
rename from applications/samples/src/api_samples/typing_utils.py
rename to applications/common/server/common/typing_utils.py
diff --git a/applications/samples/src/api_samples/util.py b/applications/common/server/common/util.py
similarity index 98%
rename from applications/samples/src/api_samples/util.py
rename to applications/common/server/common/util.py
index 6e8baa5f..86cd1196 100644
--- a/applications/samples/src/api_samples/util.py
+++ b/applications/common/server/common/util.py
@@ -2,7 +2,7 @@
import six
import typing
-from api_samples import typing_utils
+from common import typing_utils
def _deserialize(data, klass):
diff --git a/applications/samples/src/git_push.sh b/applications/common/server/git_push.sh
similarity index 100%
rename from applications/samples/src/git_push.sh
rename to applications/common/server/git_push.sh
diff --git a/applications/common/server/requirements.txt b/applications/common/server/requirements.txt
new file mode 100644
index 00000000..af78e2e1
--- /dev/null
+++ b/applications/common/server/requirements.txt
@@ -0,0 +1,16 @@
+connexion[swagger-ui] >= 2.6.0; python_version>="3.6"
+# 2.3 is the last version that supports python 3.4-3.5
+connexion[swagger-ui] <= 2.3.0; python_version=="3.5" or python_version=="3.4"
+# connexion requires werkzeug but connexion < 2.4.0 does not install werkzeug
+# we must peg werkzeug versions below to fix connexion
+# https://github.com/zalando/connexion/pull/1044
+werkzeug == 0.16.1; python_version=="3.5" or python_version=="3.4"
+swagger-ui-bundle >= 0.0.2
+python_dateutil >= 2.6.0
+setuptools >= 21.0.0
+
+psycopg2-binary==2.8.5
+Flask-SQLAlchemy==2.4.3
+SQLAlchemy==1.3.17
+requests==2.21.0
+flask-cors
diff --git a/applications/samples/src/setup.py b/applications/common/server/setup.py
similarity index 62%
rename from applications/samples/src/setup.py
rename to applications/common/server/setup.py
index 3e72420c..147f4f42 100644
--- a/applications/samples/src/setup.py
+++ b/applications/common/server/setup.py
@@ -3,7 +3,7 @@
import sys
from setuptools import setup, find_packages
-NAME = "api_samples"
+NAME = "openapi_server"
VERSION = "1.0.0"
# To install the library, run the following
@@ -16,26 +16,24 @@
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
- "python_dateutil>=2.6.0",
- "pyjwt>=1.7.1",
- "cloudharness"
+ "python_dateutil>=2.6.0"
]
setup(
name=NAME,
version=VERSION,
- description="CloudHarness Sample API",
- author_email="cloudharness@metacell.us",
+ description="CH service API",
+ author_email="",
url="",
- keywords=["OpenAPI", "CloudHarness Sample API"],
+ keywords=["OpenAPI", "CH service API"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
- 'console_scripts': ['api_samples=api_samples.__main__:main']},
+ 'console_scripts': ['openapi_server=openapi_server.__main__:main']},
long_description="""\
- CloudHarness Sample api
+ Cloud Harness Platform - Reference CH service API
"""
)
diff --git a/applications/workflows/src/test-requirements.txt b/applications/common/server/test-requirements.txt
similarity index 51%
rename from applications/workflows/src/test-requirements.txt
rename to applications/common/server/test-requirements.txt
index a2626d87..f8f951d7 100644
--- a/applications/workflows/src/test-requirements.txt
+++ b/applications/common/server/test-requirements.txt
@@ -1,4 +1,8 @@
pytest~=4.6.7 # needed for python 2.7+3.4
pytest-cov>=2.8.1
pytest-randomly==1.2.3 # needed for python 2.7+3.4
-flask_testing==0.6.1
\ No newline at end of file
+Flask-Testing==0.8.0
+psycopg2-binary==2.8.5
+Flask-SQLAlchemy==2.4.3
+SQLAlchemy==1.3.17
+requests==2.21.0
diff --git a/applications/samples/src/tox.ini b/applications/common/server/tox.ini
similarity index 66%
rename from applications/samples/src/tox.ini
rename to applications/common/server/tox.ini
index 8f380ee0..f66b2d84 100644
--- a/applications/samples/src/tox.ini
+++ b/applications/common/server/tox.ini
@@ -1,9 +1,11 @@
[tox]
envlist = py3
+skipsdist=True
[testenv]
deps=-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+ {toxinidir}
commands=
- pytest --cov=api_samples
\ No newline at end of file
+ pytest --cov=openapi_server
diff --git a/applications/events/deploy/resources/broker/init.sh b/applications/events/deploy/resources/broker/init.sh
index 99581a71..5caef07e 100644
--- a/applications/events/deploy/resources/broker/init.sh
+++ b/applications/events/deploy/resources/broker/init.sh
@@ -34,3 +34,14 @@ hash kubectl 2>/dev/null || {
printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp
[ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties
+# wait for events to be up & running
+# install curl first
+apt update && apt install -y curl
+for i in {1..100}
+do
+ sleep 5
+ if curl events; then
+ curl events:80/clusters --data 'name=cluster-'${POD_NAMESPACE}'&zkHosts=zookeeper.'${POD_NAMESPACE}':2181&kafkaVersion=2.2.0&jmxEnabled=true&jmxUser=&jmxPass=&logkafkaEnabled=true&pollConsumers=true&activeOffsetCacheEnabled=true&tuning.brokerViewUpdatePeriodSeconds=30&tuning.clusterManagerThreadPoolSize=2&tuning.clusterManagerThreadPoolQueueSize=100&tuning.kafkaCommandThreadPoolSize=2&tuning.kafkaCommandThreadPoolQueueSize=100&tuning.logkafkaCommandThreadPoolSize=2&tuning.logkafkaCommandThreadPoolQueueSize=100&tuning.logkafkaUpdatePeriodSeconds=30&tuning.partitionOffsetCacheTimeoutSecs=5&tuning.brokerViewThreadPoolSize=2&tuning.brokerViewThreadPoolQueueSize=1000&tuning.offsetCacheThreadPoolSize=2&tuning.offsetCacheThreadPoolQueueSize=1000&tuning.kafkaAdminClientThreadPoolSize=2&tuning.kafkaAdminClientThreadPoolQueueSize=1000&tuning.kafkaManagedOffsetMetadataCheckMillis=30000&tuning.kafkaManagedOffsetGroupCacheSize=1000000&tuning.kafkaManagedOffsetGroupExpireDays=7&securityProtocol=PLAINTEXT&saslMechanism=DEFAULT&jaasConfig=' -X POST
+ exit 0
+ fi
+done
diff --git a/applications/events/deploy/templates/roles.yml b/applications/events/deploy/templates/roles.yml
index 47018c32..370d3143 100644
--- a/applications/events/deploy/templates/roles.yml
+++ b/applications/events/deploy/templates/roles.yml
@@ -22,9 +22,10 @@ rules:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
+kind: Role
metadata:
name: node-reader
+ namespace: {{ .Release.Namespace }}
labels:
origin: github.com_Yolean_kubernetes-kafka
rules:
@@ -41,6 +42,7 @@ metadata:
labels:
origin: github.com_Yolean_kubernetes-kafka
name: kafka-pod-labler
+ namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -48,19 +50,18 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kafka
- namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
+kind: RoleBinding
metadata:
labels:
origin: github.com_Yolean_kubernetes-kafka
name: kafka-node-reader
+ namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
+ kind: Role
name: node-reader
subjects:
- kind: ServiceAccount
name: kafka
- namespace: {{ .Release.Namespace }}
diff --git a/applications/events/deploy/values.yaml b/applications/events/deploy/values.yaml
index c23221d2..37f1683d 100644
--- a/applications/events/deploy/values.yaml
+++ b/applications/events/deploy/values.yaml
@@ -1,9 +1,9 @@
-name: events
-subdomain: events
-autoservice: true
-autodeploy: false
-secureme: true
-port: 80
+harness:
+ subdomain: events
+ secured: true
+ service:
+ port: 80
+ auto: true
kafka:
- name: bootstrap
- port: 9092
\ No newline at end of file
+ name: bootstrap
+ port: 9092
\ No newline at end of file
diff --git a/applications/jupyterhub/.gitignore b/applications/jupyterhub/.gitignore
new file mode 100755
index 00000000..9c7cae91
--- /dev/null
+++ b/applications/jupyterhub/.gitignore
@@ -0,0 +1,2 @@
+src/kubespawner
+src/jupyterhub
\ No newline at end of file
diff --git a/applications/jupyterhub/Dockerfile b/applications/jupyterhub/Dockerfile
new file mode 100755
index 00000000..e5304a21
--- /dev/null
+++ b/applications/jupyterhub/Dockerfile
@@ -0,0 +1,14 @@
+FROM jupyterhub/k8s-hub:0.9.0
+USER root
+
+# COPY deploy/resources/hub/* /etc/jupyterhub/
+# COPY src/jupyterhub/jupyterhub/handlers/* /usr/local/lib/python3.6/dist-packages/jupyterhub/handlers/
+# COPY ./src/kubespawner /usr/local/lib/python3.6/dist-packages/
+COPY src src
+RUN pip install ./src/harness_jupyter
+RUN pip install ./src/chauthenticator
+RUN chmod 777 /usr/local/lib/python3.6/dist-packages/ -R
+USER jovyan
+
+
+# CMD ["jupyterhub", "--config", "/srv/jupyterhub_config.py"]
\ No newline at end of file
diff --git a/applications/jupyterhub/README.md b/applications/jupyterhub/README.md
new file mode 100755
index 00000000..de49fd20
--- /dev/null
+++ b/applications/jupyterhub/README.md
@@ -0,0 +1,8 @@
+# Custom JupyterHub
+
+This Helm chart is forked from the main JupyterHub helm chart and adapted to CloudHarness path structure.
+
+The main personalizations happen in the file `jupyterhub_config.py` in order to implement the following
+dynamic behaviours like:
+ - Use a different image based on current path/parameter
+ - Mount custom volumes
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/resources/hub/cull_idle_servers.py b/applications/jupyterhub/deploy/resources/hub/cull_idle_servers.py
new file mode 100755
index 00000000..f5c32cdf
--- /dev/null
+++ b/applications/jupyterhub/deploy/resources/hub/cull_idle_servers.py
@@ -0,0 +1,372 @@
+#!/usr/bin/env python3
+# Imported from https://github.com/jupyterhub/jupyterhub/blob/6b1046697/examples/cull-idle/cull_idle_servers.py
+"""script to monitor and cull idle single-user servers
+
+Caveats:
+
+last_activity is not updated with high frequency,
+so cull timeout should be greater than the sum of:
+
+- single-user websocket ping interval (default: 30s)
+- JupyterHub.last_activity_interval (default: 5 minutes)
+
+You can run this as a service managed by JupyterHub with this in your config::
+
+
+ c.JupyterHub.services = [
+ {
+ 'name': 'cull-idle',
+ 'admin': True,
+ 'command': 'python cull_idle_servers.py --timeout=3600'.split(),
+ }
+ ]
+
+Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:
+
+ export JUPYTERHUB_API_TOKEN=`jupyterhub token`
+ python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]
+"""
+
+from datetime import datetime, timezone
+from functools import partial
+import json
+import os
+
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib import quote
+
+import dateutil.parser
+
+from tornado.gen import coroutine, multi
+from tornado.locks import Semaphore
+from tornado.log import app_log
+from tornado.httpclient import AsyncHTTPClient, HTTPRequest
+from tornado.ioloop import IOLoop, PeriodicCallback
+from tornado.options import define, options, parse_command_line
+
+
+def parse_date(date_string):
+ """Parse a timestamp
+
+ If it doesn't have a timezone, assume utc
+
+ Returned datetime object will always be timezone-aware
+ """
+ dt = dateutil.parser.parse(date_string)
+ if not dt.tzinfo:
+ # assume naïve timestamps are UTC
+ dt = dt.replace(tzinfo=timezone.utc)
+ return dt
+
+
+def format_td(td):
+ """
+ Nicely format a timedelta object
+
+ as HH:MM:SS
+ """
+ if td is None:
+ return "unknown"
+ if isinstance(td, str):
+ return td
+ seconds = int(td.total_seconds())
+ h = seconds // 3600
+ seconds = seconds % 3600
+ m = seconds // 60
+ seconds = seconds % 60
+ return f"{h:02}:{m:02}:{seconds:02}"
+
+
+@coroutine
+def cull_idle(
+ url,
+ api_token,
+ inactive_limit,
+ cull_users=False,
+ remove_named_servers=False,
+ max_age=0,
+ concurrency=10,
+):
+ """Shutdown idle single-user servers
+
+ If cull_users, inactive *users* will be deleted as well.
+ """
+ auth_header = {
+ 'Authorization': 'token %s' % api_token,
+ }
+ req = HTTPRequest(
+ url=url + '/users',
+ headers=auth_header,
+ )
+ now = datetime.now(timezone.utc)
+ client = AsyncHTTPClient()
+
+ if concurrency:
+ semaphore = Semaphore(concurrency)
+ @coroutine
+ def fetch(req):
+ """client.fetch wrapped in a semaphore to limit concurrency"""
+ yield semaphore.acquire()
+ try:
+ return (yield client.fetch(req))
+ finally:
+ yield semaphore.release()
+ else:
+ fetch = client.fetch
+
+ resp = yield fetch(req)
+ users = json.loads(resp.body.decode('utf8', 'replace'))
+ futures = []
+
+ @coroutine
+ def handle_server(user, server_name, server):
+ """Handle (maybe) culling a single server
+
+ Returns True if server is now stopped (user removable),
+ False otherwise.
+ """
+ log_name = user['name']
+ if server_name:
+ log_name = '%s/%s' % (user['name'], server_name)
+ if server.get('pending'):
+ app_log.warning(
+ "Not culling server %s with pending %s",
+ log_name, server['pending'])
+ return False
+
+ if server.get('started'):
+ age = now - parse_date(server['started'])
+ else:
+ # started may be undefined on jupyterhub < 0.9
+ age = None
+
+ # check last activity
+ # last_activity can be None in 0.9
+ if server['last_activity']:
+ inactive = now - parse_date(server['last_activity'])
+ else:
+ # no activity yet, use start date
+ # last_activity may be None with jupyterhub 0.9,
+ # which introduces the 'started' field which is never None
+ # for running servers
+ inactive = age
+
+ should_cull = (inactive is not None and
+ inactive.total_seconds() >= inactive_limit)
+ if should_cull:
+ app_log.info(
+ "Culling server %s (inactive for %s)",
+ log_name, format_td(inactive))
+
+ if max_age and not should_cull:
+ # only check started if max_age is specified
+ # so that we can still be compatible with jupyterhub 0.8
+ # which doesn't define the 'started' field
+ if age is not None and age.total_seconds() >= max_age:
+ app_log.info(
+ "Culling server %s (age: %s, inactive for %s)",
+ log_name, format_td(age), format_td(inactive))
+ should_cull = True
+
+ if not should_cull:
+ app_log.debug(
+ "Not culling server %s (age: %s, inactive for %s)",
+ log_name, format_td(age), format_td(inactive))
+ return False
+
+ body = None
+ if server_name:
+ # culling a named server
+ # A named server can be stopped and kept available to the user
+ # for starting again or stopped and removed. To remove the named
+ # server we have to pass an additional option in the body of our
+ # DELETE request.
+ delete_url = url + "/users/%s/servers/%s" % (
+ quote(user['name']),
+ quote(server['name']),
+ )
+ if remove_named_servers:
+ body = json.dumps({"remove": True})
+ else:
+ delete_url = url + '/users/%s/server' % quote(user['name'])
+
+ req = HTTPRequest(
+ url=delete_url,
+ method='DELETE',
+ headers=auth_header,
+ body=body,
+ allow_nonstandard_methods=True,
+ )
+ resp = yield fetch(req)
+ if resp.code == 202:
+ app_log.warning(
+ "Server %s is slow to stop",
+ log_name,
+ )
+ # return False to prevent culling user with pending shutdowns
+ return False
+ return True
+
+ @coroutine
+ def handle_user(user):
+ """Handle one user.
+
+ Create a list of their servers, and async exec them. Wait for
+ that to be done, and if all servers are stopped, possibly cull
+ the user.
+ """
+ # shutdown servers first.
+ # Hub doesn't allow deleting users with running servers.
+ # named servers contain the 'servers' dict
+ if 'servers' in user:
+ servers = user['servers']
+ # Otherwise, server data is intermingled in with the user
+ # model
+ else:
+ servers = {}
+ if user['server']:
+ servers[''] = {
+ 'started': user.get('started'),
+ 'last_activity': user['last_activity'],
+ 'pending': user['pending'],
+ 'url': user['server'],
+ }
+ server_futures = [
+ handle_server(user, server_name, server)
+ for server_name, server in servers.items()
+ ]
+ results = yield multi(server_futures)
+ if not cull_users:
+ return
+ # some servers are still running, cannot cull users
+ still_alive = len(results) - sum(results)
+ if still_alive:
+ app_log.debug(
+ "Not culling user %s with %i servers still alive",
+ user['name'], still_alive)
+ return False
+
+ should_cull = False
+ if user.get('created'):
+ age = now - parse_date(user['created'])
+ else:
+ # created may be undefined on jupyterhub < 0.9
+ age = None
+
+ # check last activity
+ # last_activity can be None in 0.9
+ if user['last_activity']:
+ inactive = now - parse_date(user['last_activity'])
+ else:
+ # no activity yet, use start date
+ # last_activity may be None with jupyterhub 0.9,
+ # which introduces the 'created' field which is never None
+ inactive = age
+
+ should_cull = (inactive is not None and
+ inactive.total_seconds() >= inactive_limit)
+ if should_cull:
+ app_log.info(
+ "Culling user %s (inactive for %s)",
+ user['name'], inactive)
+
+ if max_age and not should_cull:
+ # only check created if max_age is specified
+ # so that we can still be compatible with jupyterhub 0.8
+ # which doesn't define the 'started' field
+ if age is not None and age.total_seconds() >= max_age:
+ app_log.info(
+ "Culling user %s (age: %s, inactive for %s)",
+ user['name'], format_td(age), format_td(inactive))
+ should_cull = True
+
+ if not should_cull:
+ app_log.debug(
+ "Not culling user %s (created: %s, last active: %s)",
+ user['name'], format_td(age), format_td(inactive))
+ return False
+
+ req = HTTPRequest(
+ url=url + '/users/%s' % user['name'],
+ method='DELETE',
+ headers=auth_header,
+ )
+ yield fetch(req)
+ return True
+
+ for user in users:
+ futures.append((user['name'], handle_user(user)))
+
+ for (name, f) in futures:
+ try:
+ result = yield f
+ except Exception:
+ app_log.exception("Error processing %s", name)
+ else:
+ if result:
+ app_log.debug("Finished culling %s", name)
+
+
+if __name__ == '__main__':
+ define(
+ 'url',
+ default=os.environ.get('JUPYTERHUB_API_URL'),
+ help="The JupyterHub API URL",
+ )
+ define('timeout', default=600, help="The idle timeout (in seconds)")
+ define('cull_every', default=0,
+ help="The interval (in seconds) for checking for idle servers to cull")
+ define('max_age', default=0,
+ help="The maximum age (in seconds) of servers that should be culled even if they are active")
+ define('cull_users', default=False,
+ help="""Cull users in addition to servers.
+ This is for use in temporary-user cases such as BinderHub.""",
+ )
+ define('remove_named_servers', default=False,
+ help="""Remove named servers in addition to stopping them.
+ This is useful for a BinderHub that uses authentication and named servers.""",
+ )
+ define('concurrency', default=10,
+ help="""Limit the number of concurrent requests made to the Hub.
+
+ Deleting a lot of users at the same time can slow down the Hub,
+ so limit the number of API requests we have outstanding at any given time.
+ """
+ )
+
+ parse_command_line()
+ if not options.cull_every:
+ options.cull_every = options.timeout // 2
+ api_token = os.environ['JUPYTERHUB_API_TOKEN']
+
+ try:
+ AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+ except ImportError as e:
+ app_log.warning(
+ "Could not load pycurl: %s\n"
+ "pycurl is recommended if you have a large number of users.",
+ e)
+
+ loop = IOLoop.current()
+ cull = partial(
+ cull_idle,
+ url=options.url,
+ api_token=api_token,
+ inactive_limit=options.timeout,
+ cull_users=options.cull_users,
+ remove_named_servers=options.remove_named_servers,
+ max_age=options.max_age,
+ concurrency=options.concurrency,
+ )
+ # schedule first cull immediately
+ # because PeriodicCallback doesn't start until the end of the first interval
+ loop.add_callback(cull)
+ # schedule periodic cull
+ pc = PeriodicCallback(cull, 1e3 * options.cull_every)
+ pc.start()
+ try:
+ loop.start()
+ except KeyboardInterrupt:
+ pass
diff --git a/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
new file mode 100755
index 00000000..e72edabc
--- /dev/null
+++ b/applications/jupyterhub/deploy/resources/hub/jupyterhub_config.py
@@ -0,0 +1,521 @@
+import os
+import re
+import sys
+import logging
+
+from tornado.httpclient import AsyncHTTPClient
+from kubernetes import client
+from jupyterhub.utils import url_path_join
+
+try:
+ from harness_jupyter.jupyterhub import harness_hub
+ harness_hub() # activates harness hooks on jupyterhub
+except Exception as e:
+ logging.error("could not import harness_jupyter", exc_info=True)
+
+
+# Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
+configuration_directory = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, configuration_directory)
+
+from z2jh import get_config, set_config_if_not_none
+
+print('Base url is', c.JupyterHub.get('base_url', '/'))
+
+# Configure JupyterHub to use the curl backend for making HTTP requests,
+# rather than the pure-python implementations. The default one starts
+# being too slow to make a large number of requests to the proxy API
+# at the rate required.
+AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+
+c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
+
+# Connect to a proxy running in a different pod
+c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
+c.ConfigurableHTTPProxy.should_start = False
+
+# Do not shut down user pods when hub is restarted
+c.JupyterHub.cleanup_servers = False
+
+# Check that the proxy has routes appropriately setup
+c.JupyterHub.last_activity_interval = 60
+
+# Don't wait at all before redirecting a spawning user to the progress page
+c.JupyterHub.tornado_settings = {
+ 'slow_spawn_timeout': 0,
+}
+
+
+def camelCaseify(s):
+ """convert snake_case to camelCase
+
+ For the common case where some_value is set from someValue
+ so we don't have to specify the name twice.
+ """
+ return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), s)
+
+
+# configure the hub db connection
+db_type = get_config('hub.db.type')
+if db_type == 'sqlite-pvc':
+ c.JupyterHub.db_url = "sqlite:///jupyterhub.sqlite"
+elif db_type == "sqlite-memory":
+ c.JupyterHub.db_url = "sqlite://"
+else:
+ set_config_if_not_none(c.JupyterHub, "db_url", "hub.db.url")
+
+
+for trait, cfg_key in (
+ # Max number of servers that can be spawning at any one time
+ ('concurrent_spawn_limit', None),
+ # Max number of servers to be running at one time
+ ('active_server_limit', None),
+ # base url prefix
+ ('base_url', None),
+ ('allow_named_servers', None),
+ ('named_server_limit_per_user', None),
+ ('authenticate_prometheus', None),
+ ('redirect_to_server', None),
+ ('shutdown_on_logout', None),
+ ('template_paths', None),
+ ('template_vars', None),
+):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.JupyterHub, trait, 'hub.' + cfg_key)
+
+c.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']
+c.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])
+
+# the hub should listen on all interfaces, so the proxy can access it
+c.JupyterHub.hub_ip = '0.0.0.0'
+
+# implement common labels
+# this duplicates the jupyterhub.commonLabels helper
+common_labels = c.KubeSpawner.common_labels = {}
+common_labels['app'] = get_config(
+ "nameOverride",
+ default=get_config("Chart.Name", "jupyterhub"),
+)
+common_labels['heritage'] = "jupyterhub"
+chart_name = get_config('Chart.Name')
+chart_version = get_config('Chart.Version')
+if chart_name and chart_version:
+ common_labels['chart'] = "{}-{}".format(
+ chart_name, chart_version.replace('+', '_'),
+ )
+release = get_config('Release.Name')
+if release:
+ common_labels['release'] = release
+
+c.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')
+
+# Max number of consecutive failures before the Hub restarts itself
+# requires jupyterhub 0.9.2
+set_config_if_not_none(
+ c.Spawner,
+ 'consecutive_failure_limit',
+ 'hub.consecutiveFailureLimit',
+)
+
+for trait, cfg_key in (
+ ('start_timeout', None),
+ ('image_pull_policy', 'image.pullPolicy'),
+ ('events_enabled', 'events'),
+ ('extra_labels', None),
+ ('extra_annotations', None),
+ ('uid', None),
+ ('fs_gid', None),
+ ('service_account', 'serviceAccountName'),
+ ('storage_extra_labels', 'storage.extraLabels'),
+ ('tolerations', 'extraTolerations'),
+ ('node_selector', None),
+ ('node_affinity_required', 'extraNodeAffinity.required'),
+ ('node_affinity_preferred', 'extraNodeAffinity.preferred'),
+ ('pod_affinity_required', 'extraPodAffinity.required'),
+ ('pod_affinity_preferred', 'extraPodAffinity.preferred'),
+ ('pod_anti_affinity_required', 'extraPodAntiAffinity.required'),
+ ('pod_anti_affinity_preferred', 'extraPodAntiAffinity.preferred'),
+ ('lifecycle_hooks', None),
+ ('init_containers', None),
+ ('extra_containers', None),
+ ('mem_limit', 'memory.limit'),
+ ('mem_guarantee', 'memory.guarantee'),
+ ('cpu_limit', 'cpu.limit'),
+ ('cpu_guarantee', 'cpu.guarantee'),
+ ('extra_resource_limits', 'extraResource.limits'),
+ ('extra_resource_guarantees', 'extraResource.guarantees'),
+ ('environment', 'extraEnv'),
+ ('profile_list', None),
+ ('extra_pod_config', None),
+):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.KubeSpawner, trait, 'singleuser.' + cfg_key)
+
+image = get_config("singleuser.image.name")
+if image:
+ tag = get_config("singleuser.image.tag")
+ if tag:
+ image = "{}:{}".format(image, tag)
+
+ c.KubeSpawner.image = image
+
+if get_config('singleuser.imagePullSecret.enabled'):
+ c.KubeSpawner.image_pull_secrets = 'singleuser-image-credentials'
+
+# scheduling:
+if get_config('scheduling.userScheduler.enabled'):
+ c.KubeSpawner.scheduler_name = os.environ['HELM_RELEASE_NAME'] + "-user-scheduler"
+if get_config('scheduling.podPriority.enabled'):
+ c.KubeSpawner.priority_class_name = os.environ['HELM_RELEASE_NAME'] + "-default-priority"
+
+# add node-purpose affinity
+match_node_purpose = get_config('scheduling.userPods.nodeAffinity.matchNodePurpose')
+if match_node_purpose:
+ node_selector = dict(
+ matchExpressions=[
+ dict(
+ key="hub.jupyter.org/node-purpose",
+ operator="In",
+ values=["user"],
+ )
+ ],
+ )
+ if match_node_purpose == 'prefer':
+ c.KubeSpawner.node_affinity_preferred.append(
+ dict(
+ weight=100,
+ preference=node_selector,
+ ),
+ )
+ elif match_node_purpose == 'require':
+ c.KubeSpawner.node_affinity_required.append(node_selector)
+ elif match_node_purpose == 'ignore':
+ pass
+ else:
+ raise ValueError("Unrecognized value for matchNodePurpose: %r" % match_node_purpose)
+
+# add dedicated-node toleration
+for key in (
+ 'hub.jupyter.org/dedicated',
+ # workaround GKE not supporting / in initial node taints
+ 'hub.jupyter.org_dedicated',
+):
+ c.KubeSpawner.tolerations.append(
+ dict(
+ key=key,
+ operator='Equal',
+ value='user',
+ effect='NoSchedule',
+ )
+ )
+
+# Configure dynamically provisioning pvc
+storage_type = get_config('singleuser.storage.type')
+
+if storage_type == 'dynamic':
+ pvc_name_template = get_config('singleuser.storage.dynamic.pvcNameTemplate')
+
+ # TODO Here we can configure a custom volume depending on what workpace we want to load (or into static if we want to initialize the volume inside the volume manager)
+ c.KubeSpawner.pvc_name_template = pvc_name_template
+ volume_name_template = get_config('singleuser.storage.dynamic.volumeNameTemplate')
+ c.KubeSpawner.storage_pvc_ensure = True
+ set_config_if_not_none(c.KubeSpawner, 'storage_class', 'singleuser.storage.dynamic.storageClass')
+ set_config_if_not_none(c.KubeSpawner, 'storage_access_modes', 'singleuser.storage.dynamic.storageAccessModes')
+ set_config_if_not_none(c.KubeSpawner, 'storage_capacity', 'singleuser.storage.capacity')
+
+ # Add volumes to singleuser pods
+ c.KubeSpawner.volumes = [
+ {
+ 'name': volume_name_template,
+ 'persistentVolumeClaim': {
+ 'claimName': pvc_name_template,
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "1Mi"
+ }
+ }
+ }
+
+ }
+ }
+ ]
+ c.KubeSpawner.volume_mounts = [
+ {
+ 'mountPath': get_config('singleuser.storage.homeMountPath'),
+ 'name': volume_name_template
+ }
+ ]
+elif storage_type == 'static':
+ pvc_claim_name = get_config('singleuser.storage.static.pvcName')
+ c.KubeSpawner.volumes = [{
+ 'name': 'home',
+ 'persistentVolumeClaim': {
+ 'claimName': 'accounts-db'
+ }
+ }]
+
+ c.KubeSpawner.volume_mounts = [{
+ 'mountPath': get_config('singleuser.storage.homeMountPath'),
+ 'name': 'home',
+ 'subPath': get_config('singleuser.storage.static.subPath')
+ }]
+
+c.KubeSpawner.volumes.extend(get_config('singleuser.storage.extraVolumes', []))
+c.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extraVolumeMounts', []))
+
+# Gives spawned containers access to the API of the hub
+c.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']
+c.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])
+
+# Allow switching authenticators easily
+auth_type = get_config('auth.type')
+email_domain = 'local'
+
+common_oauth_traits = (
+ ('client_id', None),
+ ('client_secret', None),
+ ('oauth_callback_url', 'callbackUrl'),
+)
+
+if auth_type == 'google':
+ c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('hosted_domain', None),
+ ('login_service', None),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.GoogleOAuthenticator, trait, 'auth.google.' + cfg_key)
+ email_domain = get_config('auth.google.hostedDomain')
+elif auth_type == 'github':
+ c.JupyterHub.authenticator_class = 'oauthenticator.github.GitHubOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('github_organization_whitelist', 'orgWhitelist'),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.GitHubOAuthenticator, trait, 'auth.github.' + cfg_key)
+elif auth_type == 'cilogon':
+ c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'
+ for trait, cfg_key in common_oauth_traits:
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.CILogonOAuthenticator, trait, 'auth.cilogon.' + cfg_key)
+elif auth_type == 'gitlab':
+ c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('gitlab_group_whitelist', None),
+ ('gitlab_project_id_whitelist', None),
+ ('gitlab_url', None),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.GitLabOAuthenticator, trait, 'auth.gitlab.' + cfg_key)
+elif auth_type == 'azuread':
+ c.JupyterHub.authenticator_class = 'oauthenticator.azuread.AzureAdOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('tenant_id', None),
+ ('username_claim', None),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+
+ set_config_if_not_none(c.AzureAdOAuthenticator, trait, 'auth.azuread.' + cfg_key)
+elif auth_type == 'mediawiki':
+ c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('index_url', None),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.MWOAuthenticator, trait, 'auth.mediawiki.' + cfg_key)
+elif auth_type == 'globus':
+ c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'
+ for trait, cfg_key in common_oauth_traits + (
+ ('identity_provider', None),
+ ):
+ if cfg_key is None:
+ cfg_key = camelCaseify(trait)
+ set_config_if_not_none(c.GlobusOAuthenticator, trait, 'auth.globus.' + cfg_key)
+elif auth_type == 'hmac':
+ c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'
+ c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secretKey'))
+elif auth_type == 'dummy':
+ c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
+ set_config_if_not_none(c.DummyAuthenticator, 'password', 'auth.dummy.password')
+elif auth_type == 'tmp':
+ c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'
+elif auth_type == 'ch':
+ c.JupyterHub.authenticator_class = 'chauthenticator.CloudHarnessAuthenticator'
+elif auth_type == 'lti':
+ c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'
+ set_config_if_not_none(c.LTIAuthenticator, 'consumers', 'auth.lti.consumers')
+elif auth_type == 'ldap':
+ c.JupyterHub.authenticator_class = 'ldapauthenticator.LDAPAuthenticator'
+ c.LDAPAuthenticator.server_address = get_config('auth.ldap.server.address')
+ set_config_if_not_none(c.LDAPAuthenticator, 'server_port', 'auth.ldap.server.port')
+ set_config_if_not_none(c.LDAPAuthenticator, 'use_ssl', 'auth.ldap.server.ssl')
+ set_config_if_not_none(c.LDAPAuthenticator, 'allowed_groups', 'auth.ldap.allowedGroups')
+ set_config_if_not_none(c.LDAPAuthenticator, 'bind_dn_template', 'auth.ldap.dn.templates')
+ set_config_if_not_none(c.LDAPAuthenticator, 'lookup_dn', 'auth.ldap.dn.lookup')
+ set_config_if_not_none(c.LDAPAuthenticator, 'lookup_dn_search_filter', 'auth.ldap.dn.search.filter')
+ set_config_if_not_none(c.LDAPAuthenticator, 'lookup_dn_search_user', 'auth.ldap.dn.search.user')
+ set_config_if_not_none(c.LDAPAuthenticator, 'lookup_dn_search_password', 'auth.ldap.dn.search.password')
+ set_config_if_not_none(c.LDAPAuthenticator, 'lookup_dn_user_dn_attribute', 'auth.ldap.dn.user.dnAttribute')
+ set_config_if_not_none(c.LDAPAuthenticator, 'escape_userdn', 'auth.ldap.dn.user.escape')
+ set_config_if_not_none(c.LDAPAuthenticator, 'valid_username_regex', 'auth.ldap.dn.user.validRegex')
+ set_config_if_not_none(c.LDAPAuthenticator, 'user_search_base', 'auth.ldap.dn.user.searchBase')
+ set_config_if_not_none(c.LDAPAuthenticator, 'user_attribute', 'auth.ldap.dn.user.attribute')
+elif auth_type == 'custom':
+ # full_class_name looks like "myauthenticator.MyAuthenticator".
+ # To create a docker image with this class availabe, you can just have the
+ # following Dockerfile:
+ # FROM jupyterhub/k8s-hub:v0.4
+ # RUN pip3 install myauthenticator
+ full_class_name = get_config('auth.custom.className')
+ c.JupyterHub.authenticator_class = full_class_name
+ auth_class_name = full_class_name.rsplit('.', 1)[-1]
+ auth_config = c[auth_class_name]
+ auth_config.update(get_config('auth.custom.config') or {})
+else:
+ raise ValueError("Unhandled auth type: %r" % auth_type)
+
+set_config_if_not_none(c.OAuthenticator, 'scope', 'auth.scopes')
+
+set_config_if_not_none(c.Authenticator, 'enable_auth_state', 'auth.state.enabled')
+
+# Enable admins to access user servers
+set_config_if_not_none(c.JupyterHub, 'admin_access', 'auth.admin.access')
+set_config_if_not_none(c.Authenticator, 'admin_users', 'auth.admin.users')
+set_config_if_not_none(c.Authenticator, 'whitelist', 'auth.whitelist.users')
+
+c.JupyterHub.services = []
+
+if get_config('cull.enabled', False):
+ cull_cmd = [
+ 'python3',
+ '/etc/jupyterhub/cull_idle_servers.py',
+ ]
+ base_url = c.JupyterHub.get('base_url', '/')
+ cull_cmd.append(
+ '--url=http://127.0.0.1:8081' + url_path_join(base_url, 'hub/api')
+ )
+
+ cull_timeout = get_config('cull.timeout')
+ if cull_timeout:
+ cull_cmd.append('--timeout=%s' % cull_timeout)
+
+ cull_every = get_config('cull.every')
+ if cull_every:
+ cull_cmd.append('--cull-every=%s' % cull_every)
+
+ cull_concurrency = get_config('cull.concurrency')
+ if cull_concurrency:
+ cull_cmd.append('--concurrency=%s' % cull_concurrency)
+
+ if get_config('cull.users'):
+ cull_cmd.append('--cull-users')
+
+ if get_config('cull.removeNamedServers'):
+ cull_cmd.append('--remove-named-servers')
+
+ cull_max_age = get_config('cull.maxAge')
+ if cull_max_age:
+ cull_cmd.append('--max-age=%s' % cull_max_age)
+
+ c.JupyterHub.services.append({
+ 'name': 'cull-idle',
+ 'admin': True,
+ 'command': cull_cmd,
+ })
+
+for name, service in get_config('hub.services', {}).items():
+ # jupyterhub.services is a list of dicts, but
+ # in the helm chart it is a dict of dicts for easier merged-config
+ service.setdefault('name', name)
+ # handle camelCase->snake_case of api_token
+ api_token = service.pop('apiToken', None)
+ if api_token:
+ service['api_token'] = api_token
+ c.JupyterHub.services.append(service)
+
+
+set_config_if_not_none(c.Spawner, 'cmd', 'singleuser.cmd')
+set_config_if_not_none(c.Spawner, 'default_url', 'singleuser.defaultUrl')
+
+cloud_metadata = get_config('singleuser.cloudMetadata', {})
+
+if not cloud_metadata.get('enabled', False):
+ # Use iptables to block access to cloud metadata by default
+ network_tools_image_name = get_config('singleuser.networkTools.image.name')
+ network_tools_image_tag = get_config('singleuser.networkTools.image.tag')
+ ip_block_container = client.V1Container(
+ name="block-cloud-metadata",
+ image=f"{network_tools_image_name}:{network_tools_image_tag}",
+ command=[
+ 'iptables',
+ '-A', 'OUTPUT',
+ '-d', cloud_metadata.get('ip', '169.254.169.254'),
+ '-j', 'DROP'
+ ],
+ security_context=client.V1SecurityContext(
+ privileged=True,
+ run_as_user=0,
+ capabilities=client.V1Capabilities(add=['NET_ADMIN'])
+ )
+ )
+
+ c.KubeSpawner.init_containers.append(ip_block_container)
+
+
+if get_config('debug.enabled', False):
+ c.JupyterHub.log_level = 'DEBUG'
+ c.Spawner.debug = True
+
+
+extra_config = get_config('hub.extraConfig', {})
+if isinstance(extra_config, str):
+ from textwrap import indent, dedent
+ msg = dedent(
+ """
+ hub.extraConfig should be a dict of strings,
+ but found a single string instead.
+
+ extraConfig as a single string is deprecated
+ as of the jupyterhub chart version 0.6.
+
+ The keys can be anything identifying the
+ block of extra configuration.
+
+ Try this instead:
+
+ hub:
+ extraConfig:
+ myConfig: |
+ {}
+
+ This configuration will still be loaded,
+ but you are encouraged to adopt the nested form
+ which enables easier merging of multiple extra configurations.
+ """
+ )
+ print(
+ msg.format(
+ indent(extra_config, ' ' * 10).lstrip()
+ ),
+ file=sys.stderr
+ )
+ extra_config = {'deprecated string': extra_config}
+
+for key, config_py in sorted(extra_config.items()):
+ print("Loading extra config: %s" % key)
+ exec(config_py)
+
+c.apps = get_config('apps')
+c.registry = get_config('registry')
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/resources/hub/z2jh.py b/applications/jupyterhub/deploy/resources/hub/z2jh.py
new file mode 100755
index 00000000..c5a2f37e
--- /dev/null
+++ b/applications/jupyterhub/deploy/resources/hub/z2jh.py
@@ -0,0 +1,86 @@
+"""
+Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
+
+Methods here can be imported by extraConfig in values.yaml
+"""
+from collections import Mapping
+from functools import lru_cache
+import os
+
+import yaml
+
+
+# memoize so we only load config once
+@lru_cache()
+def _load_config():
+ """Load configuration from disk
+
+ Memoized to only load once
+ """
+ cfg = {}
+ for source in ('config', 'secret'):
+ path = f"/etc/jupyterhub/{source}/values.yaml"
+ if os.path.exists(path):
+ print(f"Loading {path}")
+ with open(path) as f:
+ values = yaml.safe_load(f)
+ cfg = _merge_dictionaries(cfg, values)
+ else:
+ print(f"No config at {path}")
+
+ path = f"/etc/jupyterhub/config/allvalues.yaml"
+ if os.path.exists(path):
+ print("Loading global CloudHarness config")
+ with open(path) as f:
+ values = yaml.safe_load(f)
+ cfg = _merge_dictionaries(cfg, values)
+ return cfg
+
+
+def _merge_dictionaries(a, b):
+ """Merge two dictionaries recursively.
+
+ Simplified From https://stackoverflow.com/a/7205107
+ """
+ merged = a.copy()
+ for key in b:
+ if key in a:
+ if isinstance(a[key], Mapping) and isinstance(b[key], Mapping):
+ merged[key] = _merge_dictionaries(a[key], b[key])
+ else:
+ merged[key] = b[key]
+ else:
+ merged[key] = b[key]
+ return merged
+
+
+def get_config(key, default=None):
+ """
+ Find a config item of a given name & return it
+
+ Parses everything as YAML, so lists and dicts are available too
+
+ get_config("a.b.c") returns config['a']['b']['c']
+ """
+ value = _load_config()
+ # resolve path in yaml
+ for level in key.split('.'):
+ if not isinstance(value, dict):
+ # a parent is a scalar or null,
+ # can't resolve full path
+ return default
+ if level not in value:
+ return default
+ else:
+ value = value[level]
+ return value
+
+
+def set_config_if_not_none(cparent, name, key):
+ """
+ Find a config item of a given name, set the corresponding Jupyter
+ configuration item if not None
+ """
+ data = get_config(key)
+ if data is not None:
+ setattr(cparent, name, data)
diff --git a/applications/jupyterhub/deploy/resources/userscheduler-defaultpolicy.yaml b/applications/jupyterhub/deploy/resources/userscheduler-defaultpolicy.yaml
new file mode 100755
index 00000000..68eac001
--- /dev/null
+++ b/applications/jupyterhub/deploy/resources/userscheduler-defaultpolicy.yaml
@@ -0,0 +1,75 @@
+{
+ "kind": "Policy",
+ "apiVersion": "v1",
+ "predicates": [
+ { "name": "PodFitsResources" },
+ { "name": "HostName" },
+ { "name": "PodFitsHostPorts" },
+ { "name": "MatchNodeSelector" },
+ { "name": "NoDiskConflict" },
+ { "name": "PodToleratesNodeTaints" },
+ { "name": "MaxEBSVolumeCount" },
+ { "name": "MaxGCEPDVolumeCount" },
+ { "name": "MaxAzureDiskVolumeCount" },
+ { "name": "CheckVolumeBinding" },
+ { "name": "NoVolumeZoneConflict" },
+ { "name": "MatchInterPodAffinity" }
+ ],
+ "priorities": [
+ { "name": "NodePreferAvoidPodsPriority", "weight": 161051 },
+ { "name": "NodeAffinityPriority", "weight": 14641 },
+ { "name": "InterPodAffinityPriority", "weight": 1331 },
+ { "name": "MostRequestedPriority", "weight": 121 },
+ { "name": "ImageLocalityPriority", "weight": 11}
+ ],
+ "hardPodAffinitySymmetricWeight" : 100,
+ "alwaysCheckAllPredicates" : false
+}
+
+# # Notes about ranges
+# ImageLocalityPriority - ranges from 0-10 * 11
+# MostRequestedPriority - ranges from 0-10 * 11^2
+# InterPodAffinityPriority - ranges from 0-1 * 11^3 (i guess)
+# NodeAffinityPriority - ranges from 0-1 * 11^4 (i guess)
+# NodePreferAvoidPodsPriority - ranges from 0-1 * 11^5 (i guess)
+
+# # Notes about the GeneralPredicates
+# The following predicates was not found by kube-scheduler 1.11.1-beta.0
+# { "name": "CheckNodePIDPressure" },
+# { "name": "CheckNodeUnschedulable" },
+# { "name": "CheckNodeCondition" },
+# { "name": "General" },
+# { "name": "PodToleratesNodeNoExecuteTaints" },
+# { "name": "CheckNodeMemoryPressure" },
+# { "name": "CheckNodeDiskPressure" },
+
+# # Notes about the priorities
+# NodePreferAvoidPodsPriority: What does this really mean?
+# HardPodAffinitySymmetricWeight: "It represents the weight of implicit
+# PreferredDuringScheduling affinity rule." - preferred node affinity or preferred
+# pod/anti-pod affinity or those affinities in general? How does this relate to
+# the InterPodAffinityPriority and NodeAffinityPriority?
+
+# AlwaysCheckAllPredicates: scheduler checks all the configured predicates even
+# after one or more of them fails.
+
+# GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates
+# pass. noncriticalPredicates are the predicates that only non-critical pods need
+# and EssentialPredicates are the predicates that all pods, including critical
+# pods, need
+
+# MostRequestedPriority: Is using the default MostRequestedPriorityMap that is a
+# priority function that favors nodes with most requested resources. It calculates
+# the percentage of memory and CPU requested by pods scheduled on the node, and
+# prioritizes based on the maximum of the average of the fraction of requested to
+# capacity.
+
+# Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) /
+# capacity)) / 2
+
+# ImageLocalityPriorityMap is a priority function that favors nodes that already
+# have requested pod container's images. It will detect whether the requested
+# images are present on a node, and then calculate a score ranging from 0 to 10
+# based on the total size of those images. - If none of the images are present,
+# this node will be given the lowest priority. - If some of the images are present
+# on a node, the larger their sizes' sum, the higher the node's priority.
diff --git a/applications/jupyterhub/deploy/templates/NOTES.txt b/applications/jupyterhub/deploy/templates/NOTES.txt
new file mode 100755
index 00000000..0ceede94
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/NOTES.txt
@@ -0,0 +1,38 @@
+Thank you for installing JupyterHub!
+
+Your release is named {{ .Release.Name }} and installed into the namespace {{ .Release.Namespace }}.
+
+You can find if the hub and proxy is ready by doing:
+
+ kubectl --namespace={{ .Release.Namespace }} get pod
+
+and watching for both those pods to be in status 'Running'.
+
+You can find the public IP of the JupyterHub by doing:
+
+ kubectl --namespace={{ .Release.Namespace }} get svc proxy-public
+
+It might take a few minutes for it to appear!
+
+Note that this is still an alpha release! If you have questions, feel free to
+ 1. Read the guide at https://z2jh.jupyter.org
+ 2. Chat with us at https://gitter.im/jupyterhub/jupyterhub
+ 3. File issues at https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
+
+{{- if .Values.apps.jupyterhub.hub.extraConfigMap }}
+
+DEPRECATION: hub.extraConfigMap is deprecated in jupyterhub chart 0.8.
+Use top-level `custom` instead:
+
+---
+custom:
+{{- (merge dict .Values.apps.jupyterhub.custom .Values.apps.jupyterhub.hub.extraConfigMap) | toYaml | nindent 2}}
+---
+{{- end }}
+
+{{- if and (not .Values.apps.jupyterhub.scheduling.podPriority.enabled) (and .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas) }}
+
+WARNING: You are using user placeholders without pod priority enabled, either
+enable pod priority or stop using the user placeholders to avoid wasting cloud
+resources.
+{{- end }}
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/_helpers.tpl b/applications/jupyterhub/deploy/templates/_helpers.tpl
new file mode 100755
index 00000000..f06aedfa
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/_helpers.tpl
@@ -0,0 +1,269 @@
+{{- /*
+ ## About
+ This file contains helpers to systematically name, label and select Kubernetes
+ objects we define in the .yaml template files.
+
+
+ ## How helpers work
+ Helm helper functions is a good way to avoid repeating something. They will
+ generate some output based on one single dictionary of input that we call the
+ helpers scope. When you are in helm, you access your current scope with a
+ single a single punctuation (.).
+
+ When you ask a helper to render its content, one often forward the current
+ scope to the helper in order to allow it to access .Release.Name,
+ .Values.apps.jupyterhub.rbac.enabled and similar values.
+
+ #### Example - Passing the current scope
+ {{ include "jupyterhub.commonLabels" . }}
+
+ It would be possible to pass something specific instead of the current scope
+ (.), but that would make .Release.Name etc. inaccessible by the helper which
+ is something we aim to avoid.
+
+ #### Example - Passing a new scope
+ {{ include "demo.bananaPancakes" (dict "pancakes" 5 "bananas" 3) }}
+
+ To let a helper access the current scope along with additional values we have
+ opted to create dictionary containing additional values that is then populated
+ with additional values from the current scope through a the merge function.
+
+ #### Example - Passing a new scope augmented with the old
+ {{- $_ := merge (dict "appLabel" "kube-lego") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+
+ In this way, the code within the definition of `jupyterhub.matchLabels` will
+ be able to access .Release.Name and .appLabel.
+
+ NOTE:
+ The ordering of merge is crucial, the latter argument is merged into the
+ former. So if you would swap the order you would influence the current scope
+ risking unintentional behavior. Therefore, always put the fresh unreferenced
+ dictionary (dict "key1" "value1") first and the current scope (.) last.
+
+
+ ## Declared helpers
+ - appLabel |
+ - componentLabel |
+ - nameField | uses componentLabel
+ - commonLabels | uses appLabel
+ - labels | uses commonLabels
+ - matchLabels | uses labels
+ - podCullerSelector | uses matchLabels
+
+
+ ## Example usage
+ ```yaml
+ # Excerpt from proxy/autohttps/deployment.yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: {{ include "jupyterhub.nameField" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
+ selector:
+ matchLabels:
+ {{- $_ := merge (dict "appLabel" "kube-lego") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.labels" $_ | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-http: "true"
+ ```
+
+ NOTE:
+ The "jupyterhub.matchLabels" and "jupyterhub.labels" is passed an augmented
+ scope that will influence the helpers' behavior. It get the current scope
+ "." but merged with a dictionary containing extra key/value pairs. In this
+ case the "." scope was merged with a small dictionary containing only one
+ key/value pair "appLabel: kube-lego". It is required for kube-lego to
+ function properly. It is a way to override the default app label's value.
+*/}}
+
+
+{{- /*
+ jupyterhub.appLabel:
+ Used by "jupyterhub.labels".
+*/}}
+{{- define "jupyterhub.appLabel" -}}
+{{ .Values.apps.jupyterhub.nameOverride | default .Chart.Name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.componentLabel:
+ Used by "jupyterhub.labels" and "jupyterhub.nameField".
+
+ NOTE: The component label is determined by either...
+ - 1: The provided scope's .componentLabel
+ - 2: The template's filename if living in the root folder
+ - 3: The template parent folder's name
+ - : ...and is combined with .componentPrefix and .componentSuffix
+*/}}
+{{- define "jupyterhub.componentLabel" -}}
+{{- $file := .Template.Name | base | trimSuffix ".yaml" -}}
+{{- $parent := .Template.Name | dir | base | trimPrefix "templates" -}}
+{{- $component := .componentLabel | default $parent | default $file -}}
+{{- $component := print (.componentPrefix | default "") $component (.componentSuffix | default "") -}}
+{{ $component }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.nameField:
+ Populates the name field's value.
+ NOTE: some name fields are limited to 63 characters by the DNS naming spec.
+
+ TODO:
+ - [ ] Set all name fields using this helper.
+ - [ ] Optionally prefix the release name based on some setting in
+ .Values.apps.jupyterhub to allow for multiple deployments within a single namespace.
+*/}}
+{{- define "jupyterhub.nameField" -}}
+{{- $name := print (.namePrefix | default "") (include "jupyterhub.componentLabel" .) (.nameSuffix | default "") -}}
+{{ printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.commonLabels:
+ Foundation for "jupyterhub.labels".
+ Provides labels: app, release, (chart and heritage).
+*/}}
+{{- define "jupyterhub.commonLabels" -}}
+app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
+release: {{ .Release.Name }}
+{{- if not .matchLabels }}
+chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+heritage: {{ .heritageLabel | default .Release.Service }}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.labels:
+ Provides labels: component, app, release, (chart and heritage).
+*/}}
+{{- define "jupyterhub.labels" -}}
+component: {{ include "jupyterhub.componentLabel" . }}
+{{ include "jupyterhub.commonLabels" . }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.matchLabels:
+ Used to provide pod selection labels: component, app, release.
+*/}}
+{{- define "jupyterhub.matchLabels" -}}
+{{- $_ := merge (dict "matchLabels" true) . -}}
+{{ include "jupyterhub.labels" $_ }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.dockersingleuserconfigjson:
+ Creates a base64 encoded docker registry json blob for use in a image pull
+ secret, just like the `kubectl create secret docker-registry` command does
+ for the generated secrets data.dockerconfigjson field. The output is
+ verified to be exactly the same even if you have a password spanning
+ multiple lines as you may need to use a private GCR registry.
+
+ - https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+*/}}
+{{- define "jupyterhub.dockersingleuserconfigjson" -}}
+{{ include "jupyterhub.dockersingleuserconfigjson.yaml" . | b64enc }}
+{{- end }}
+
+{{- define "jupyterhub.dockersingleuserconfigjson.yaml" -}}
+{{- with .Values.apps.jupyterhub.singleuser.imagePullSecret -}}
+{
+ "auths": {
+ {{ .registry | default "https://index.docker.io/v1/" | quote }}: {
+ "username": {{ .username | quote }},
+ "password": {{ .password | quote }},
+ {{- if .email }}
+ "email": {{ .email | quote }},
+ {{- end }}
+ "auth": {{ (print .username ":" .password) | b64enc | quote }}
+ }
+ }
+}
+{{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.dockerhubconfigjson:
+ Creates a base64 encoded docker registry json blob for use in a image pull
+ secret, just like the `kubectl create secret docker-registry` command does
+ for the generated secrets data.dockerhubconfigjson field. The output is
+ verified to be exactly the same even if you have a password spanning
+ multiple lines as you may need to use a private GCR registry.
+
+ - https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+*/}}
+{{- define "jupyterhub.dockerhubconfigjson" -}}
+{{ include "jupyterhub.dockerhubconfigjson.yaml" . | b64enc }}
+{{- end }}
+
+{{- define "jupyterhub.dockerhubconfigjson.yaml" -}}
+{{- with .Values.apps.jupyterhub.hub.imagePullSecret -}}
+{
+ "auths": {
+ {{ .registry | default "https://index.docker.io/v1/" | quote }}: {
+ "username": {{ .username | quote }},
+ "password": {{ .password | quote }},
+ {{- if .email }}
+ "email": {{ .email | quote }},
+ {{- end }}
+ "auth": {{ (print .username ":" .password) | b64enc | quote }}
+ }
+ }
+}
+{{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.resources:
+ The resource request of a singleuser.
+*/}}
+{{- define "jupyterhub.resources" -}}
+{{- $r1 := .Values.apps.jupyterhub.singleuser.cpu.guarantee -}}
+{{- $r2 := .Values.apps.jupyterhub.singleuser.memory.guarantee -}}
+{{- $r3 := .Values.apps.jupyterhub.singleuser.extraResource.guarantees -}}
+{{- $r := or $r1 $r2 $r3 -}}
+{{- $l1 := .Values.apps.jupyterhub.singleuser.cpu.limit -}}
+{{- $l2 := .Values.apps.jupyterhub.singleuser.memory.limit -}}
+{{- $l3 := .Values.apps.jupyterhub.singleuser.extraResource.limits -}}
+{{- $l := or $l1 $l2 $l3 -}}
+{{- if $r -}}
+requests:
+ {{- if $r1 }}
+ cpu: {{ .Values.apps.jupyterhub.singleuser.cpu.guarantee }}
+ {{- end }}
+ {{- if $r2 }}
+ memory: {{ .Values.apps.jupyterhub.singleuser.memory.guarantee }}
+ {{- end }}
+ {{- if $r3 }}
+ {{- range $key, $value := .Values.apps.jupyterhub.singleuser.extraResource.guarantees }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- if $l }}
+limits:
+ {{- if $l1 }}
+ cpu: {{ .Values.apps.jupyterhub.singleuser.cpu.limit }}
+ {{- end }}
+ {{- if $l2 }}
+ memory: {{ .Values.apps.jupyterhub.singleuser.memory.limit }}
+ {{- end }}
+ {{- if $l3 }}
+ {{- range $key, $value := .Values.apps.jupyterhub.singleuser.extraResource.limits }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/configmap.yaml b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
new file mode 100755
index 00000000..67b45375
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/configmap.yaml
@@ -0,0 +1,42 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: hub-config
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ allvalues.yaml: |
+ {{- .Values | toYaml | nindent 4 }}
+{{- $values := pick .Values.apps.jupyterhub "auth" "cull" "custom" "debug" "hub" "scheduling" "singleuser" }}
+{{- /* trim secret .Values.apps.jupyterhub. Update here if new secrets are added! */ -}}
+{{- /* make a copy of .Values.apps.jupyterhub.auth to avoid modifying the original */ -}}
+{{- $_ := set $values "auth" (merge dict .Values.apps.jupyterhub.auth) }}
+{{- $_ := set $.Values.apps.jupyterhub.auth "state" (omit $.Values.apps.jupyterhub.auth.state "cryptoKey") }}
+{{- range $key, $auth := .Values.apps.jupyterhub.auth }}
+ {{- if typeIs "map[string]interface {}" $auth }}
+ {{- if (or $auth.clientSecret $auth.password) }}
+ {{- $_ := set $.Values.apps.jupyterhub.auth $key (omit $auth "clientSecret" "password") }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- $_ := set $values "hub" (omit $.Values.apps.jupyterhub.hub "cookieSecret" "extraEnv" "extraConfigMap") -}}
+{{- $_ := set $.Values.apps.jupyterhub.hub "services" dict }}
+{{- range $key, $service := .Values.apps.jupyterhub.hub.services }}
+ {{- if $service.apiToken }}
+ {{- $_ := set $.Values.apps.jupyterhub.hub.services $key (omit $service "apiToken") }}
+ {{- end }}
+{{- end }}
+{{- /* copy .Values.apps.jupyterhub.singleuser */ -}}
+{{- $_ := set $values "singleuser" (omit .Values.apps.jupyterhub.singleuser "imagePullSecret") }}
+{{- $_ := set $.Values.apps.jupyterhub.singleuser "imagePullSecret" (omit .Values.apps.jupyterhub.singleuser.imagePullSecret "password") }}
+{{- /* preserve behavior of deprecated hub.extraConfigMap */ -}}
+{{- $_ := set $values "custom" (merge dict $.Values.apps.jupyterhub.custom .Values.apps.jupyterhub.hub.extraConfigMap) }}
+{{- /* passthrough subset of Chart / Release */ -}}
+{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
+{{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
+ values.yaml: |
+ {{- $values | toYaml | nindent 4 }}
+
+ {{- /* Glob files to allow them to be mounted by the hub pod */ -}}
+ {{- /* key=filename: value=content */ -}}
+ {{- (.Files.Glob "resources/jupyterhub/hub/*").AsConfig | nindent 2 }}
diff --git a/applications/jupyterhub/deploy/templates/hub/deployment.yaml b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
new file mode 100755
index 00000000..d4bdaf3e
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/deployment.yaml
@@ -0,0 +1,237 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ strategy:
+ {{- .Values.apps.jupyterhub.hub.deploymentStrategy | toYaml | trimSuffix "\n" | nindent 4 }}
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-api: "true"
+ hub.jupyter.org/network-access-proxy-http: "true"
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- if .Values.apps.jupyterhub.hub.labels }}
+ {{- .Values.apps.jupyterhub.hub.labels | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ annotations:
+ # This lets us autorestart when the secret changes!
+ checksum/config-map: {{ include (print .Template.BasePath "/jupyterhub/hub/configmap.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print .Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
+ {{- if .Values.apps.jupyterhub.hub.annotations }}
+ {{- .Values.apps.jupyterhub.hub.annotations | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ .Release.Name }}-default-priority
+ {{- end }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.hub.nodeSelector }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ volumes:
+ - name: config
+ configMap:
+ name: hub-config
+ - name: secret
+ secret:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ secretName: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ secretName: hub-secret
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.extraVolumes }}
+ {{- .Values.apps.jupyterhub.hub.extraVolumes | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ {{- if eq .Values.apps.jupyterhub.hub.db.type "sqlite-pvc" }}
+ - name: hub-db-dir
+ persistentVolumeClaim:
+ claimName: hub-db-dir
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.rbac.enabled }}
+ serviceAccountName: hub
+ {{- end }}
+ securityContext:
+ fsGroup: {{ .Values.apps.jupyterhub.hub.fsGid }}
+ {{- if and .Values.registry.secret (contains .Values.registry.name .Values.apps.jupyterhub.image) }}
+ imagePullSecrets:
+ - name: {{ .Values.registry.secret }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.initContainers }}
+ initContainers:
+ {{- .Values.apps.jupyterhub.hub.initContainers | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ containers:
+ {{- if .Values.apps.jupyterhub.hub.extraContainers }}
+ {{- .Values.apps.jupyterhub.hub.extraContainers | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ - name: hub
+ image: {{ .Values.apps.jupyterhub.image }}
+ command:
+ - jupyterhub
+ - --config
+ - /etc/jupyterhub/jupyterhub_config.py
+ {{- if .Values.apps.jupyterhub.debug.enabled }}
+ - --debug
+ {{- end }}
+ {{- /*
+ We want to do automatic upgrades for sqlite-pvc by default, but
+ allow users to opt out of that if they want. Users using their own
+ db need to 'opt in' Go Templates treat nil and "" and false as
+ 'false', making this code complex. We can probably make this a
+ one-liner, but doing combinations of boolean vars in go templates is
+ very inelegant & hard to reason about.
+ */}}
+ {{- $upgradeType := typeOf .Values.apps.jupyterhub.hub.db.upgrade }}
+ {{- if eq $upgradeType "bool" }}
+ {{- /* .Values.apps.jupyterhub.hub.db.upgrade has been explicitly set to true or false */}}
+ {{- if .Values.apps.jupyterhub.hub.db.upgrade }}
+ - --upgrade-db
+ {{- end }}
+ {{- else if eq $upgradeType "" }}
+ {{- /* .Values.apps.jupyterhub.hub.db.upgrade is nil */}}
+ {{- if eq .Values.apps.jupyterhub.hub.db.type "sqlite-pvc" }}
+ - --upgrade-db
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /etc/jupyterhub/jupyterhub_config.py
+ subPath: jupyterhub_config.py
+ name: config
+ - mountPath: /etc/jupyterhub/z2jh.py
+ subPath: z2jh.py
+ name: config
+ - mountPath: /etc/jupyterhub/cull_idle_servers.py
+ subPath: cull_idle_servers.py
+ name: config
+ - mountPath: /etc/jupyterhub/config/
+ name: config
+ - mountPath: /etc/jupyterhub/secret/
+ name: secret
+ {{- if .Values.apps.jupyterhub.hub.extraVolumeMounts }}
+ {{- .Values.apps.jupyterhub.hub.extraVolumeMounts | toYaml | trimSuffix "\n" | nindent 12 }}
+ {{- end }}
+ {{- if eq .Values.apps.jupyterhub.hub.db.type "sqlite-pvc" }}
+ - mountPath: /srv/jupyterhub
+ name: hub-db-dir
+ {{- if .Values.apps.jupyterhub.hub.db.pvc.subPath }}
+ subPath: {{ .Values.apps.jupyterhub.hub.db.pvc.subPath | quote }}
+ {{- end }}
+ {{- end }}
+ resources:
+ {{- .Values.apps.jupyterhub.hub.resources | toYaml | trimSuffix "\n" | nindent 12 }}
+ {{- with .Values.apps.jupyterhub.hub.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ securityContext:
+ runAsUser: {{ .Values.apps.jupyterhub.hub.uid }}
+ # Don't allow any process to execute as root inside the container
+ allowPrivilegeEscalation: false
+ env:
+ - name: PYTHONUNBUFFERED
+ value: "1"
+ - name: HELM_RELEASE_NAME
+ value: {{ .Release.Name | quote }}
+ {{- if .Values.apps.jupyterhub.hub.cookieSecret }}
+ - name: JPY_COOKIE_SECRET
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: hub.cookie-secret
+ {{- end }}
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIGPROXY_AUTH_TOKEN
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: proxy.token
+ {{- if .Values.apps.jupyterhub.auth.state.enabled }}
+ - name: JUPYTERHUB_CRYPT_KEY
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: auth.state.crypto-key
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.db.password }}
+ {{- if eq .Values.apps.jupyterhub.hub.db.type "mysql" }}
+ - name: MYSQL_PWD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: hub.db.password
+ {{- else if eq .Values.apps.jupyterhub.hub.db.type "postgres" }}
+ - name: PGPASSWORD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: hub.db.password
+ {{- end }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.extraEnv }}
+ {{- $extraEnvType := typeOf .Values.apps.jupyterhub.hub.extraEnv }}
+ {{- /* If we have a list, embed that here directly. This allows for complex configuration from configmap, downward API, etc. */}}
+ {{- if eq $extraEnvType "[]interface {}" }}
+ {{- .Values.apps.jupyterhub.hub.extraEnv | toYaml | trimSuffix "\n" | nindent 12 }}
+ {{- else if eq $extraEnvType "map[string]interface {}" }}
+ {{- /* If we have a map, treat those as key-value pairs. */}}
+ {{- range $key, $value := .Values.apps.jupyterhub.hub.extraEnv }}
+ - name: {{ $key | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - containerPort: 8081
+ name: hub
+ {{- if .Values.apps.jupyterhub.hub.livenessProbe.enabled }}
+ # livenessProbe notes:
+ # We don't know how long hub database upgrades could take
+ # so having a liveness probe could be a bit risky unless we put
+ # a initialDelaySeconds value with long enough margin for that
+ # to not be an issue. If it is too short, we could end up aborting
+ # database upgrades midway or ending up in an infinite restart
+ # loop.
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.hub.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.hub.livenessProbe.periodSeconds }}
+ httpGet:
+ path: {{ .Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/hub/health
+ port: hub
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.hub.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.hub.readinessProbe.periodSeconds }}
+ httpGet:
+ path: {{ .Values.apps.jupyterhub.hub.baseUrl | trimSuffix "/" }}/hub/health
+ port: hub
+ {{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/image-credentials-secret.yaml b/applications/jupyterhub/deploy/templates/hub/image-credentials-secret.yaml
new file mode 100755
index 00000000..c915b66d
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/image-credentials-secret.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.apps.jupyterhub.hub.imagePullSecret.enabled }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: hub-image-credentials
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-image-credentials") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockerhubconfigjson" . }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/netpol.yaml b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
new file mode 100755
index 00000000..6c5b39c5
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/netpol.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.apps.jupyterhub.hub.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-hub: "true"
+ ports:
+ - protocol: TCP
+ port: 8081
+ {{- /* Useful if you want to give hub access to pods from other namespaces */}}
+ {{- if .Values.apps.jupyterhub.hub.networkPolicy.ingress}}
+ {{- .Values.apps.jupyterhub.hub.networkPolicy.ingress| toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ egress:
+ {{- /*
+ The default is to allow all egress for hub If you want to restrict it the
+ following egress is required
+ - proxy:8001
+ - singleuser:8888
+ - Kubernetes api-server
+ */}}
+ {{- if .Values.apps.jupyterhub.hub.networkPolicy.egress }}
+ {{- .Values.apps.jupyterhub.hub.networkPolicy.egress | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/pdb.yaml b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
new file mode 100755
index 00000000..295b2437
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/pdb.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.apps.jupyterhub.hub.pdb.enabled -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ minAvailable: {{ .Values.apps.jupyterhub.hub.pdb.minAvailable }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/pvc.yaml b/applications/jupyterhub/deploy/templates/hub/pvc.yaml
new file mode 100755
index 00000000..58305936
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/pvc.yaml
@@ -0,0 +1,25 @@
+{{- if eq .Values.apps.jupyterhub.hub.db.type "sqlite-pvc" -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: hub-db-dir
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- if .Values.apps.jupyterhub.hub.db.pvc.annotations }}
+ annotations:
+ {{- .Values.apps.jupyterhub.hub.db.pvc.annotations | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.apps.jupyterhub.hub.db.pvc.selector }}
+ selector:
+ {{- .Values.apps.jupyterhub.hub.db.pvc.selector | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ {{- if typeIs "string" .Values.apps.jupyterhub.hub.db.pvc.storageClassName }}
+ storageClassName: {{ .Values.apps.jupyterhub.hub.db.pvc.storageClassName | quote }}
+ {{- end }}
+ accessModes:
+ {{- .Values.apps.jupyterhub.hub.db.pvc.accessModes | toYaml | trimSuffix "\n" | nindent 4 }}
+ resources:
+ requests:
+ storage: {{ .Values.apps.jupyterhub.hub.db.pvc.storage | quote }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/rbac.yaml b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
new file mode 100755
index 00000000..8bb935d6
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/rbac.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.apps.jupyterhub.rbac.enabled -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""] # "" indicates the core API group
+ resources: ["pods", "persistentvolumeclaims"]
+ verbs: ["get", "watch", "list", "create", "delete"]
+ - apiGroups: [""] # "" indicates the core API group
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: hub
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: Role
+ name: hub
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/secret.yaml b/applications/jupyterhub/deploy/templates/hub/secret.yaml
new file mode 100755
index 00000000..c22c155f
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/secret.yaml
@@ -0,0 +1,37 @@
+{{- if not .Values.apps.jupyterhub.hub.existingSecret }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: hub-secret
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: Opaque
+data:
+ proxy.token: {{ (required "Proxy token must be a 32 byte random string generated with `openssl rand -hex 32`!" .Values.apps.jupyterhub.proxy.secretToken) | b64enc | quote }}
+ {{- if .Values.apps.jupyterhub.hub.cookieSecret }}
+ hub.cookie-secret: {{ .Values.apps.jupyterhub.hub.cookieSecret | b64enc | quote }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.db.password }}
+ hub.db.password: {{ .Values.apps.jupyterhub.hub.db.password | b64enc | quote }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.auth.state.enabled }}
+ auth.state.crypto-key: {{ (required "Encryption key is required for auth state to be persisted!" .Values.apps.jupyterhub.auth.state.cryptoKey) | b64enc | quote }}
+ {{- end }}
+ {{- $values := dict "hub" dict }}
+ {{- /* pluck only needed secret values, preserving .Values.apps.jupyterhub.yaml structure */ -}}
+ {{- $_ := set $values "auth" dict }}
+ {{- range $key, $auth := .Values.apps.jupyterhub.auth }}
+ {{- if typeIs "map[string]interface {}" $auth }}
+ {{- if (or $auth.clientSecret $auth.password) }}
+ {{- $_ := set $.Values.apps.jupyterhub.auth $key (pick $auth "clientSecret" "password") }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- $_ := set $.Values.apps.jupyterhub.hub "services" dict }}
+ {{- range $key, $service := .Values.apps.jupyterhub.hub.services }}
+ {{- if $service.apiToken }}
+ {{- $_ := set $.Values.apps.jupyterhub.hub.services $key (pick $service "apiToken") }}
+ {{- end }}
+ {{- end }}
+ .Values.apps.jupyterhub.yaml: {{ $values | toYaml | b64enc | quote }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/hub/service.yaml b/applications/jupyterhub/deploy/templates/hub/service.yaml
new file mode 100755
index 00000000..e9565c48
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/hub/service.yaml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: hub
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ {{- if not (index .Values.apps.jupyterhub.hub.service.annotations "prometheus.io/scrape") }}
+ prometheus.io/scrape: "true"
+ {{- end }}
+ {{- if not (index .Values.apps.jupyterhub.hub.service.annotations "prometheus.io/path") }}
+ prometheus.io/path: {{ .Values.apps.jupyterhub.hub.baseUrl }}hub/metrics
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.hub.service.annotations }}
+ {{- .Values.apps.jupyterhub.hub.service.annotations | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.apps.jupyterhub.hub.service.type }}
+ {{- if .Values.apps.jupyterhub.hub.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.apps.jupyterhub.hub.service.loadBalancerIP }}
+ {{- end }}
+ selector:
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ ports:
+ - protocol: TCP
+ port: 8081
+ targetPort: 8081
+ {{- if .Values.apps.jupyterhub.hub.service.ports.nodePort }}
+ nodePort: {{ .Values.apps.jupyterhub.hub.service.ports.nodePort }}
+ {{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/_daemonset-helper.yaml b/applications/jupyterhub/deploy/templates/image-puller/_daemonset-helper.yaml
new file mode 100755
index 00000000..8bb932d8
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/_daemonset-helper.yaml
@@ -0,0 +1,119 @@
+{{- /*
+Returns an image-puller daemonset. Two daemonsets will be created like this.
+- hook-image-puller: for pre helm upgrade image pulling (lives temporarily)
+- continuous-image-puller: for newly added nodes image pulling
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset" -}}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ print .componentPrefix "image-puller" }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- if .hook }}
+ hub.jupyter.org/deletable: "true"
+ {{- end }}
+ {{- if .hook }}
+ annotations:
+ {{- /*
+ Allows the daemonset to be deleted when the image-awaiter job is completed.
+ */}}
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "-10"
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 100%
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the DaemonSet to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ spec:
+ tolerations:
+ {{- include "jupyterhub.userTolerations" . | nindent 8 }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- if include "jupyterhub.userNodeAffinityRequired" . }}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ {{- include "jupyterhub.userNodeAffinityRequired" . | nindent 14 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 0
+ automountServiceAccountToken: false
+ {{- if or .Values.apps.jupyterhub.singleuser.imagePullSecret.enabled .Values.apps.jupyterhub.singleuser.image.pullSecrets }}
+ imagePullSecrets:
+ {{- if .Values.apps.jupyterhub.singleuser.imagePullSecret.enabled }}
+ - name: {{ if .hook -}} hook- {{- end -}} singleuser-image-credentials
+ {{ else }}
+ {{- range .Values.apps.jupyterhub.singleuser.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{ end }}
+ {{- end }}
+ initContainers:
+ - name: image-pull-singleuser
+ image: {{ .Values.apps.jupyterhub.singleuser.image.name }}:{{ .Values.apps.jupyterhub.singleuser.image.tag }}
+ {{- with .Values.apps.jupyterhub.singleuser.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- range $k, $container := .Values.apps.jupyterhub.singleuser.profileList }}
+ {{- if $container.kubespawner_override }}
+ {{- if $container.kubespawner_override.image }}
+ - name: image-pull-singleuser-profilelist-{{ $k }}
+ image: {{ $container.kubespawner_override.image }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if not .Values.apps.jupyterhub.singleuser.cloudMetadata.enabled }}
+ - name: image-pull-metadata-block
+ image: {{ .Values.apps.jupyterhub.singleuser.networkTools.image.name }}:{{ .Values.apps.jupyterhub.singleuser.networkTools.image.tag }}
+ {{- with .Values.apps.jupyterhub.singleuser.networkTools.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- end }}
+ {{- range $k, $v := .Values.apps.jupyterhub.prePuller.extraImages }}
+ - name: image-pull-{{ $k }}
+ image: {{ $v.name }}:{{ $v.tag }}
+ {{- with $v.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- end }}
+ {{- range $k, $container := .Values.apps.jupyterhub.singleuser.extraContainers }}
+ - name: image-pull-singleuser-extra-container-{{ $k }}
+ image: {{ $container.image }}
+ {{- with $container.imagePullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- end }}
+ containers:
+ - name: pause
+ image: {{ .Values.apps.jupyterhub.prePuller.pause.image.name }}:{{ .Values.apps.jupyterhub.prePuller.pause.image.tag }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/daemonset.yaml b/applications/jupyterhub/deploy/templates/image-puller/daemonset.yaml
new file mode 100755
index 00000000..d7411f76
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/daemonset.yaml
@@ -0,0 +1,20 @@
+{{- /*
+The hook-image-puller daemonset will be created with the highest priority during
+helm upgrades. It's task is to pull the required images on all nodes. When the
+image-awaiter job confirms the required images to be pulled, the daemonset is
+deleted. Only then will the actual helm upgrade start.
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.hook.enabled }}
+{{- $_ := merge (dict "hook" true "componentPrefix" "hook-") . }}
+{{- include "jupyterhub.imagePuller.daemonset" $_ }}
+{{- end }}
+---
+{{- /*
+The continuous-image-puller daemonset task is to pull required images to nodes
+that are added in between helm upgrades, for example by manually adding a node
+or by the cluster autoscaler.
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.continuous.enabled }}
+{{- $_ := merge (dict "hook" false "componentPrefix" "continuous-") . }}
+{{ include "jupyterhub.imagePuller.daemonset" $_ }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/job.yaml b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
new file mode 100755
index 00000000..c35fdf22
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/job.yaml
@@ -0,0 +1,42 @@
+{{- /*
+This job has a part to play in a helm upgrade process. It simply waits for the
+hook-image-puller daemonset which is started slightly before this job to get
+its' pods running. If all those pods are running they must have pulled all the
+required images on all nodes as they are used as init containers with a dummy
+command.
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.hook.enabled -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: hook-image-awaiter
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "10"
+spec:
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Job to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ spec:
+ restartPolicy: Never
+ {{- if .Values.apps.jupyterhub.rbac.enabled }}
+ serviceAccountName: hook-image-awaiter
+ {{- end }}
+ containers:
+ - image: {{ .Values.apps.jupyterhub.prePuller.hook.image.name }}:{{ .Values.apps.jupyterhub.prePuller.hook.image.tag }}
+ name: hook-image-awaiter
+ imagePullPolicy: IfNotPresent
+ command:
+ - /image-awaiter
+ - -ca-path=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ - -auth-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -api-server-address=https://$(KUBERNETES_SERVICE_HOST):$(KUBERNETES_SERVICE_PORT)
+ - -namespace={{ .Release.Namespace }}
+ - -daemonset=hook-image-puller
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
new file mode 100755
index 00000000..1baffdaa
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/image-puller/rbac.yaml
@@ -0,0 +1,63 @@
+{{- /*
+Permissions to be used by the hook-image-awaiter job
+*/}}
+{{- if .Values.apps.jupyterhub.prePuller.hook.enabled }}
+{{- if .Values.apps.jupyterhub.rbac.enabled }}
+{{- /*
+This service account...
+*/ -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hook-image-awaiter
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+---
+{{- /*
+... will be used by this role...
+*/}}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: hook-image-awaiter
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+rules:
+ - apiGroups: ["apps"] # "" indicates the core API group
+ resources: ["daemonsets"]
+ verbs: ["get"]
+---
+{{- /*
+... as declared by this binding.
+*/}}
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: hook-image-awaiter
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+subjects:
+ - kind: ServiceAccount
+ name: hook-image-awaiter
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: Role
+ name: hook-image-awaiter
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
new file mode 100755
index 00000000..08bd7bba
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/autohttps/_README.txt
@@ -0,0 +1,9 @@
+# Automatic HTTPS Terminator
+
+This directory has Kubernetes objects for automatic Let's Encrypt Support.
+When enabled, we create a new deployment object that has an nginx-ingress
+and kube-lego container in it. This is responsible for requesting,
+storing and renewing certificates as needed from Let's Encrypt.
+
+The only change required outside of this directory is in the `proxy-public`
+service, which targets different hubs based on automatic HTTPS status.
\ No newline at end of file
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
new file mode 100755
index 00000000..0f5a0da6
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/autohttps/configmap.yaml
@@ -0,0 +1,134 @@
+{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts (not (not .Values.tls))) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: traefik-proxy-config
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ # This configmap provides the complete configuration for our traefik proxy.
+ # traefik has 'static' config and 'dynamic' config (https://docs.traefik.io/getting-started/configuration-overview/)
+ # traefik.toml contains the static config, while dynamic.toml has the dynamic config.
+ traefik.toml: |
+
+ # We wanna listen on both port 80 & 443
+ [entryPoints]
+ [entryPoints.http]
+ # traefik is used only for TLS termination, so port 80 is just for redirects
+ # No configuration for timeouts, etc needed
+ address = ":80"
+
+ [entryPoints.https]
+ address = ":443"
+
+ [entryPoints.https.transport.respondingTimeouts]
+ # High idle timeout, because we have websockets
+ idleTimeout = "10m0s"
+
+ [log]
+ level = "INFO"
+
+ [accessLog]
+ [accessLog.filters]
+ # Only error codes
+ statusCodes = ["500-599"]
+
+ # Redact possible sensitive headers from log
+ [accessLog.fields.headers]
+ [accessLog.fields.headers.names]
+ Authorization = "redact"
+ Cookie = "redact"
+ Set-Cookie = "redact"
+ X-Xsrftoken = "redact"
+
+ # We want certificates to come from Let's Encrypt, with the HTTP-01 challenge
+ [certificatesResolvers.le.acme]
+ email = {{ required "proxy.https.letsencrypt.contactEmail is a required field" .Values.apps.jupyterhub.proxy.https.letsencrypt.contactEmail | quote }}
+ storage = "/etc/acme/acme.json"
+ {{- if .Values.apps.jupyterhub.proxy.https.letsencrypt.acmeServer }}
+ caServer = {{ .Values.apps.jupyterhub.proxy.https.letsencrypt.acmeServer | quote }}
+ {{- end}}
+ [certificatesResolvers.le.acme.httpChallenge]
+ # Use our port 80 http endpoint for the HTTP-01 challenge
+ entryPoint = "http"
+
+ [providers]
+ [providers.file]
+ # Configuration for routers & other dynamic items come from this file
+ # This file is also provided by this configmap
+ filename = '/etc/traefik/dynamic.toml'
+
+ dynamic.toml: |
+ # Configure TLS to give us an A+ in the ssllabs test
+ [tls]
+ [tls.options]
+ [tls.options.default]
+ sniStrict = true
+ # Do not support insecureTLS 1.0 or 1.1
+ minVersion = "VersionTLS12"
+ # Ciphers to support, adapted from https://ssl-config.mozilla.org/#server=traefik&server-version=1.7.12&config=intermediate
+ # This supports a reasonable number of browsers.
+ cipherSuites = [
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
+ ]
+
+ # traefik uses middlewares to set options in specific routes
+ # These set up the middleware options, which are then referred to by the routes
+ [http.middlewares]
+ # Used to do http -> https redirects
+ [http.middlewares.redirect.redirectScheme]
+ scheme = "https"
+
+ # Used to set appropriate headers in requests sent to CHP
+ [http.middlewares.https.headers]
+ [http.middlewares.https.headers.customRequestHeaders]
+ # Tornado needs this for referrer checks
+ # You run into stuff like https://github.com/jupyterhub/jupyterhub/issues/2284 otherwise
+ X-Scheme = "https"
+
+ # Used to set HSTS headers based on user provided options
+ [http.middlewares.hsts.headers]
+ stsSeconds = {{ int64 .Values.apps.jupyterhub.proxy.traefik.hsts.maxAge }}
+ {{ if .Values.apps.jupyterhub.proxy.traefik.hsts.includeSubdomains }}
+ stsIncludeSubdomains = true
+ {{- end }}
+
+
+ # Routers match conditions (rule) to options (middlewares) and a backend (service)
+ [http.routers]
+ # Listen on the http endpoint (port 80), redirect everything to https
+ [http.routers.httpredirect]
+ rule = "PathPrefix(`/`)"
+ service = "chp"
+ entrypoints = ["http"]
+ middlewares = ["redirect"]
+
+ # Listen on https endpoint (port 443), proxy everything to chp
+ [http.routers.chp]
+ rule = "PathPrefix(`/`)"
+ entrypoints = ["https"]
+ middlewares = ["hsts", "https"]
+ service = "chp"
+
+ [http.routers.chp.tls]
+ # use our nice TLS defaults, and get HTTPS from Let's Encrypt
+ options = "default"
+ certResolver = "le"
+ {{- range $host := .Values.apps.jupyterhub.proxy.https.hosts }}
+ [[http.routers.chp.tls.domains]]
+ main = "{{ $host }}"
+ {{- end}}
+
+ # Set CHP to be our only backend where traffic is routed to
+ [http.services]
+ [http.services.chp.loadBalancer]
+ [[http.services.chp.loadBalancer.servers]]
+ url = "http://proxy-http:8000/"
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
new file mode 100755
index 00000000..cb9aafe8
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/autohttps/deployment.yaml
@@ -0,0 +1,93 @@
+{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts (not (not .Values.tls))) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: autohttps
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-http: "true"
+ annotations:
+ checksum/config-map: {{ include (print .Template.BasePath "/jupyterhub/proxy/autohttps/configmap.yaml") . | sha256sum }}
+ spec:
+ {{- if .Values.apps.jupyterhub.rbac.enabled }}
+ serviceAccountName: autohttps
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ .Release.Name }}-default-priority
+ {{- end }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.nodeSelector }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ volumes:
+ - name: certificates
+ emptyDir: {}
+ - name: traefik-config
+ configMap:
+ name: traefik-proxy-config
+ initContainers:
+ - name: load-acme
+ image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+ {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command: ["/usr/local/bin/secret-sync.py", "load", "proxy-public-tls-acme", "acme.json", "/etc/acme/acme.json"]
+ env:
+ # We need this to get logs immediately
+ - name: PYTHONUNBUFFERED
+ value: "True"
+ volumeMounts:
+ - name: certificates
+ mountPath: /etc/acme
+ containers:
+ - name: traefik
+ image: "{{ .Values.apps.jupyterhub.proxy.traefik.image.name }}:{{ .Values.apps.jupyterhub.proxy.traefik.image.tag }}"
+ {{- with .Values.apps.jupyterhub.proxy.traefik.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ resources:
+ {{- .Values.apps.jupyterhub.proxy.traefik.resources | toYaml | trimSuffix "\n" | nindent 12 }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ - name: https
+ containerPort: 443
+ protocol: TCP
+ volumeMounts:
+ - name: traefik-config
+ mountPath: /etc/traefik
+ - name: certificates
+ mountPath: /etc/acme
+ - name: secret-sync
+ image: "{{ .Values.apps.jupyterhub.proxy.secretSync.image.name }}:{{ .Values.apps.jupyterhub.proxy.secretSync.image.tag }}"
+ {{- with .Values.apps.jupyterhub.proxy.secretSync.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /usr/local/bin/secret-sync.py
+ - watch-save
+ - --label=app={{ include "jupyterhub.appLabel" . }}
+ - --label=release={{ .Release.Name }}
+ - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ - --label=heritage=secret-sync
+ - proxy-public-tls-acme
+ - acme.json
+ - /etc/acme/acme.json
+ env:
+ # We need this to get logs immediately
+ - name: PYTHONUNBUFFERED
+ value: "True"
+ volumeMounts:
+ - name: certificates
+ mountPath: /etc/acme
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
new file mode 100755
index 00000000..85fdba66
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/autohttps/rbac.yaml
@@ -0,0 +1,36 @@
+{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts (not (not .Values.tls))) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+{{- if (and $autoHTTPS .Values.apps.jupyterhub.rbac.enabled) -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: autohttps
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "patch", "list", "create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: autohttps
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+subjects:
+- kind: ServiceAccount
+ name: autohttps
+ apiGroup:
+roleRef:
+ kind: Role
+ name: autohttps
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: autohttps
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
new file mode 100755
index 00000000..f7eee558
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/autohttps/service.yaml
@@ -0,0 +1,23 @@
+{{- $HTTPS := (and .Values.apps.jupyterhub.proxy.https.hosts (not (not .Values.tls))) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: proxy-http
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- range $key, $value := .Values.apps.jupyterhub.proxy.service.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ annotations: {{ toJson .Values.apps.jupyterhub.proxy.service.annotations }}
+spec:
+ type: ClusterIP
+ selector:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+ ports:
+ - protocol: TCP
+ port: 8000
+ targetPort: 8000
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/deployment.yaml b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
new file mode 100755
index 00000000..bed74166
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/deployment.yaml
@@ -0,0 +1,139 @@
+{{- $manualHTTPS := and (not (not .Values.tls)) (eq .Values.apps.jupyterhub.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and (not (not .Values.tls)) (eq .Values.apps.jupyterhub.proxy.https.type "secret") -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: proxy
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ strategy:
+ {{- .Values.apps.jupyterhub.proxy.deploymentStrategy | toYaml | trimSuffix "\n" | nindent 4 }}
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-hub: "true"
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- if .Values.apps.jupyterhub.proxy.labels }}
+ {{- .Values.apps.jupyterhub.proxy.labels | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ annotations:
+ # This lets us autorestart when the secret changes!
+ checksum/hub-secret: {{ include (print $.Template.BasePath "/jupyterhub/hub/secret.yaml") . | sha256sum }}
+ checksum/proxy-secret: {{ include (print $.Template.BasePath "/jupyterhub/proxy/secret.yaml") . | sha256sum }}
+ {{- if .Values.apps.jupyterhub.proxy.annotations }}
+ {{- .Values.apps.jupyterhub.proxy.annotations | toYaml | trimSuffix "\n" | nindent 8 }}
+ {{- end }}
+ spec:
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ .Release.Name }}-default-priority
+ {{- end }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.proxy.nodeSelector }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ {{- if $manualHTTPS }}
+ volumes:
+ - name: tls-secret
+ secret:
+ secretName: proxy-manual-tls
+ {{- else if $manualHTTPSwithsecret }}
+ volumes:
+ - name: tls-secret
+ secret:
+ secretName: {{ .Values.apps.jupyterhub.proxy.https.secret.name }}
+ {{- end }}
+ containers:
+ - name: chp
+ image: {{ .Values.apps.jupyterhub.proxy.chp.image.name }}:{{ .Values.apps.jupyterhub.proxy.chp.image.tag }}
+ command:
+ - configurable-http-proxy
+ - --ip=0.0.0.0
+ - --api-ip=0.0.0.0
+ - --api-port=8001
+ - --default-target=http://$(HUB_SERVICE_HOST):$(HUB_SERVICE_PORT)
+ - --error-target=http://$(HUB_SERVICE_HOST):$(HUB_SERVICE_PORT)/hub/error
+ {{- if $manualHTTPS }}
+ - --port=8443
+ - --redirect-port=8000
+ - --redirect-to=443
+ - --ssl-key=/etc/chp/tls/tls.key
+ - --ssl-cert=/etc/chp/tls/tls.crt
+ {{- else if $manualHTTPSwithsecret }}
+ - --port=8443
+ - --redirect-port=8000
+ - --redirect-to=443
+ - --ssl-key=/etc/chp/tls/{{ .Values.apps.jupyterhub.proxy.https.secret.key }}
+ - --ssl-cert=/etc/chp/tls/{{ .Values.apps.jupyterhub.proxy.https.secret.crt }}
+ {{- else }}
+ - --port=8000
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.debug.enabled }}
+ - --log-level=debug
+ {{- end }}
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ volumeMounts:
+ - name: tls-secret
+ mountPath: /etc/chp/tls
+ readOnly: true
+ {{- end }}
+ resources:
+ {{- .Values.apps.jupyterhub.proxy.chp.resources | toYaml | trimSuffix "\n" | nindent 12 }}
+ securityContext:
+ # Don't allow any process to execute as root inside the container
+ allowPrivilegeEscalation: false
+ env:
+ - name: CONFIGPROXY_AUTH_TOKEN
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.apps.jupyterhub.hub.existingSecret }}
+ name: {{ .Values.apps.jupyterhub.hub.existingSecret }}
+ {{- else }}
+ name: hub-secret
+ {{- end }}
+ key: proxy.token
+ {{- with .Values.apps.jupyterhub.proxy.chp.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ ports:
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ - containerPort: 8443
+ name: proxy-https
+ {{- end }}
+ - containerPort: 8000
+ name: proxy-public
+ - containerPort: 8001
+ name: api
+ {{- if .Values.apps.jupyterhub.proxy.chp.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.livenessProbe.periodSeconds }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ port: proxy-https
+ scheme: HTTPS
+ {{- else }}
+ port: proxy-public
+ scheme: HTTP
+ {{- end }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.proxy.chp.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.apps.jupyterhub.proxy.chp.readinessProbe.periodSeconds }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ port: proxy-https
+ scheme: HTTPS
+ {{- else }}
+ port: proxy-public
+ scheme: HTTP
+ {{- end }}
+ {{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/netpol.yaml b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
new file mode 100755
index 00000000..b0533633
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/netpol.yaml
@@ -0,0 +1,68 @@
+{{- $HTTPS := (not (not .Values.tls)) -}}
+{{- $autoHTTPS := and $HTTPS (and (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt") .Values.apps.jupyterhub.proxy.https.hosts) -}}
+{{- $manualHTTPS := and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "secret") -}}
+{{- if .Values.apps.jupyterhub.proxy.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: proxy
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 80
+ - protocol: TCP
+ port: 443
+ {{- if not $autoHTTPS }}
+ - protocol: TCP
+ port: 8000
+ {{- end }}
+ {{- if or $manualHTTPS $manualHTTPSwithsecret}}
+ - protocol: TCP
+ port: 8443
+ {{- end }}
+ # kube-lego /healthz
+ - protocol: TCP
+ port: 8080
+ # nginx /healthz
+ - protocol: TCP
+ port: 10254
+ - from:
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-http: "true"
+ ports:
+ - protocol: TCP
+ port: 8000
+ - from:
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-api: "true"
+ ports:
+ - protocol: TCP
+ port: 8001
+ {{- /* Useful if you want to give proxy access to pods from other namespaces */}}
+ {{- if .Values.apps.jupyterhub.proxy.networkPolicy.ingress}}
+ {{- .Values.apps.jupyterhub.proxy.networkPolicy.ingress | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ egress:
+ {{- /*
+ The default is to allow all egress for proxy If you want to restrict it the
+ following egress is required
+ - hub:8081
+ - singleuser:8888
+ - Kubernetes api-server
+ */}}
+ {{- if .Values.apps.jupyterhub.proxy.networkPolicy.egress }}
+ {{- .Values.apps.jupyterhub.proxy.networkPolicy.egress | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/pdb.yaml b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
new file mode 100755
index 00000000..7ca291ff
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/pdb.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.apps.jupyterhub.proxy.pdb.enabled -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: proxy
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ minAvailable: {{ .Values.apps.jupyterhub.proxy.pdb.minAvailable }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/secret.yaml b/applications/jupyterhub/deploy/templates/proxy/secret.yaml
new file mode 100755
index 00000000..255d159d
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/secret.yaml
@@ -0,0 +1,13 @@
+{{- $manualHTTPS := and (not (not .Values.tls)) (eq .Values.apps.jupyterhub.proxy.https.type "manual") -}}
+{{- if $manualHTTPS -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: proxy-manual-tls
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .Values.apps.jupyterhub.proxy.https.manual.cert | b64enc }}
+ tls.key: {{ .Values.apps.jupyterhub.proxy.https.manual.key | b64enc }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/proxy/service.yaml b/applications/jupyterhub/deploy/templates/proxy/service.yaml
new file mode 100755
index 00000000..c55c408f
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/proxy/service.yaml
@@ -0,0 +1,79 @@
+{{- $HTTPS := (not (not .Values.tls)) -}}
+{{- $autoHTTPS := and $HTTPS (and (eq .Values.apps.jupyterhub.proxy.https.type "letsencrypt") .Values.apps.jupyterhub.proxy.https.hosts) -}}
+{{- $offloadHTTPS := and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "offload") -}}
+{{- $manualHTTPS := and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and $HTTPS (eq .Values.apps.jupyterhub.proxy.https.type "secret") -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: proxy-api
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-api") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+spec:
+ selector:
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ ports:
+ - protocol: TCP
+ port: 8001
+ targetPort: 8001
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: proxy-public
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-public") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ {{- if .Values.apps.jupyterhub.proxy.service.labels }}
+ {{- .Values.apps.jupyterhub.proxy.service.labels | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.proxy.service.annotations }}
+ annotations:
+ {{- .Values.apps.jupyterhub.proxy.service.annotations | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ # TODO: Refactor to utilize the helpers
+ {{- if $autoHTTPS }}
+ component: autohttps
+ {{- else }}
+ component: proxy
+ {{- end }}
+ release: {{ .Release.Name }}
+ ports:
+ {{- if $HTTPS }}
+ - name: https
+ port: 443
+ protocol: TCP
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ targetPort: 8443
+ {{- else if $offloadHTTPS }}
+ targetPort: 8000
+ {{- else }}
+ targetPort: 443
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.proxy.service.nodePorts.https }}
+ nodePort: {{ .Values.apps.jupyterhub.proxy.service.nodePorts.https }}
+ {{- end }}
+ {{- end }}
+ - name: http
+ port: 80
+ protocol: TCP
+ {{- if $autoHTTPS }}
+ targetPort: 80
+ {{- else }}
+ targetPort: 8000
+ {{- end }}
+ # allow proxy.service.nodePort for http
+ {{- if .Values.apps.jupyterhub.proxy.service.nodePorts.http }}
+ nodePort: {{ .Values.apps.jupyterhub.proxy.service.nodePorts.http }}
+ {{- end }}
+ type: {{ .Values.apps.jupyterhub.proxy.service.type }}
+ {{- if .Values.apps.jupyterhub.proxy.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.apps.jupyterhub.proxy.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.apps.jupyterhub.proxy.service.type "LoadBalancer") .Values.apps.jupyterhub.proxy.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- .Values.apps.jupyterhub.proxy.service.loadBalancerSourceRanges | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl b/applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl
new file mode 100755
index 00000000..5f40c42b
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/_scheduling-helpers.tpl
@@ -0,0 +1,158 @@
+{{- /*
+ jupyterhub.userTolerations
+ Lists the tolerations for node taints that the user pods should have
+*/}}
+{{- define "jupyterhub.userTolerations" -}}
+- key: hub.jupyter.org_dedicated
+ operator: Equal
+ value: user
+ effect: NoSchedule
+- key: hub.jupyter.org/dedicated
+ operator: Equal
+ value: user
+ effect: NoSchedule
+{{- if .Values.apps.jupyterhub.singleuser.extraTolerations }}
+{{- .Values.apps.jupyterhub.singleuser.extraTolerations | toYaml | trimSuffix "\n" | nindent 0 }}
+{{- end }}
+{{- end }}
+
+
+
+{{- define "jupyterhub.userNodeAffinityRequired" -}}
+{{- if eq .Values.apps.jupyterhub.scheduling.userPods.nodeAffinity.matchNodePurpose "require" -}}
+- matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [user]
+{{- end }}
+{{- if .Values.apps.jupyterhub.singleuser.extraNodeAffinity.required }}
+{{- .Values.apps.jupyterhub.singleuser.extraNodeAffinity.required | toYaml | trimSuffix "\n" | nindent 0 }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userNodeAffinityPreferred" -}}
+{{- if eq .Values.apps.jupyterhub.scheduling.userPods.nodeAffinity.matchNodePurpose "prefer" -}}
+- weight: 100
+ preference:
+ matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [user]
+{{- end }}
+{{- if .Values.apps.jupyterhub.singleuser.extraNodeAffinity.preferred }}
+{{- .Values.apps.jupyterhub.singleuser.extraNodeAffinity.preferred | toYaml | trimSuffix "\n" | nindent 0 }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAffinityRequired" -}}
+{{- if .Values.apps.jupyterhub.singleuser.extraPodAffinity.required -}}
+{{ .Values.apps.jupyterhub.singleuser.extraPodAffinity.required | toYaml | trimSuffix "\n" }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAffinityPreferred" -}}
+{{- if .Values.apps.jupyterhub.singleuser.extraPodAffinity.preferred -}}
+{{ .Values.apps.jupyterhub.singleuser.extraPodAffinity.preferred | toYaml | trimSuffix "\n" }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAntiAffinityRequired" -}}
+{{- if .Values.apps.jupyterhub.singleuser.extraPodAntiAffinity.required -}}
+{{ .Values.apps.jupyterhub.singleuser.extraPodAntiAffinity.required | toYaml | trimSuffix "\n" }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAntiAffinityPreferred" -}}
+{{- if .Values.apps.jupyterhub.singleuser.extraPodAntiAffinity.preferred -}}
+{{ .Values.apps.jupyterhub.singleuser.extraPodAntiAffinity.preferred | toYaml | trimSuffix "\n" }}
+{{- end }}
+{{- end }}
+
+
+
+{{- /*
+ jupyterhub.userAffinity:
+ It is used by user-placeholder to set the same affinity on them as the
+ spawned user pods spawned by kubespawner.
+*/}}
+{{- define "jupyterhub.userAffinity" -}}
+
+{{- $dummy := set . "nodeAffinityRequired" (include "jupyterhub.userNodeAffinityRequired" .) -}}
+{{- $dummy := set . "podAffinityRequired" (include "jupyterhub.userPodAffinityRequired" .) -}}
+{{- $dummy := set . "podAntiAffinityRequired" (include "jupyterhub.userPodAntiAffinityRequired" .) -}}
+{{- $dummy := set . "nodeAffinityPreferred" (include "jupyterhub.userNodeAffinityPreferred" .) -}}
+{{- $dummy := set . "podAffinityPreferred" (include "jupyterhub.userPodAffinityPreferred" .) -}}
+{{- $dummy := set . "podAntiAffinityPreferred" (include "jupyterhub.userPodAntiAffinityPreferred" .) -}}
+{{- $dummy := set . "hasNodeAffinity" (or .nodeAffinityRequired .nodeAffinityPreferred) -}}
+{{- $dummy := set . "hasPodAffinity" (or .podAffinityRequired .podAffinityPreferred) -}}
+{{- $dummy := set . "hasPodAntiAffinity" (or .podAntiAffinityRequired .podAntiAffinityPreferred) -}}
+
+{{- if .hasNodeAffinity -}}
+nodeAffinity:
+ {{- if .nodeAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ {{- .nodeAffinityRequired | nindent 6 }}
+ {{- end }}
+
+ {{- if .nodeAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .nodeAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- if .hasPodAffinity }}
+podAffinity:
+ {{- if .podAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAffinityRequired | nindent 4 }}
+ {{- end }}
+
+ {{- if .podAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- if .hasPodAntiAffinity }}
+podAntiAffinity:
+ {{- if .podAntiAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAntiAffinityRequired | nindent 4 }}
+ {{- end }}
+
+ {{- if .podAntiAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAntiAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
+
+
+
+{{- define "jupyterhub.coreAffinity" -}}
+{{- $require := eq .Values.apps.jupyterhub.scheduling.corePods.nodeAffinity.matchNodePurpose "require" -}}
+{{- $prefer := eq .Values.apps.jupyterhub.scheduling.corePods.nodeAffinity.matchNodePurpose "prefer" -}}
+{{- if or $require $prefer -}}
+affinity:
+ nodeAffinity:
+ {{- if $require }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [core]
+ {{- end }}
+ {{- if $prefer }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [core]
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
new file mode 100755
index 00000000..ac4eb4f2
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/priorityclass.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+apiVersion: scheduling.k8s.io/v1beta1
+kind: PriorityClass
+metadata:
+ name: {{ .Release.Name }}-default-priority
+ labels:
+ {{- $_ := merge (dict "componentLabel" "default-priority") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ annotations:
+ # PriorityClasses must be added before the other resources reference them.
+ helm.sh/hook: pre-install,pre-upgrade
+ helm.sh/hook-delete-policy: before-hook-creation
+ helm.sh/hook-weight: "-100"
+value: {{ .Values.apps.jupyterhub.scheduling.podPriority.defaultPriority }}
+globalDefault: {{ .Values.apps.jupyterhub.scheduling.podPriority.globalDefault }}
+description: "A default priority higher than user placeholders priority."
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
new file mode 100755
index 00000000..a7ba6e58
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/pdb.yaml
@@ -0,0 +1,17 @@
+{{- /*
+The cluster autoscaler should be allowed to evict and reschedule these pods if
+it would help in order to scale down a node.
+*/}}
+{{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: user-placeholder
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ minAvailable: 0
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
new file mode 100755
index 00000000..5752f807
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/priorityclass.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+{{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
+apiVersion: scheduling.k8s.io/v1beta1
+kind: PriorityClass
+metadata:
+ name: {{ .Release.Name }}-user-placeholder-priority
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ # PriorityClasses must be added before the other resources reference them.
+ helm.sh/hook: pre-install,pre-upgrade
+ helm.sh/hook-delete-policy: before-hook-creation
+ helm.sh/hook-weight: "-100"
+value: {{ .Values.apps.jupyterhub.scheduling.podPriority.userPlaceholderPriority }}
+globalDefault: false
+description: "With a priority higher or eqaul to a cluster autoscalers priority cutoff, a pod can trigger a cluster scale up. At the same time, placeholder pods priority should be lower than other pods to make them evictable."
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
new file mode 100755
index 00000000..ef15c466
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-placeholder/statefulset.yaml
@@ -0,0 +1,50 @@
+
+{{- /*
+These user-placeholder pods can be used to test cluster autoscaling in a
+controlled fashion.
+
+Example:
+$ echo 'Simulating four users...'
+$ kubectl scale sts/user-placeholder --replicas 4
+*/}}
+{{- if .Values.apps.jupyterhub.scheduling.userPlaceholder.enabled -}}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: user-placeholder
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podManagementPolicy: Parallel
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userPlaceholder.replicas }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ serviceName: "user-placeholder"
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ spec:
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ .Release.Name }}-user-placeholder-priority
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled }}
+ schedulerName: {{ .Release.Name }}-user-scheduler
+ {{- end }}
+ tolerations:
+ {{- include "jupyterhub.userTolerations" . | nindent 8 }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.singleuser.nodeSelector }}
+ {{- if include "jupyterhub.userAffinity" . }}
+ affinity:
+ {{- include "jupyterhub.userAffinity" . | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 0
+ automountServiceAccountToken: false
+ containers:
+ - name: pause
+ image: {{ .Values.apps.jupyterhub.prePuller.pause.image.name }}:{{ .Values.apps.jupyterhub.prePuller.pause.image.tag }}
+ resources:
+ {{- include "jupyterhub.resources" . | nindent 12 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/_helpers.tpl b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/_helpers.tpl
new file mode 100755
index 00000000..b5dca0f0
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/_helpers.tpl
@@ -0,0 +1,12 @@
+{{- /*
+Renders the kube-scheduler's image based on .Values.apps.jupyterhub.scheduling.userScheduler.name and
+optionally on .Values.apps.jupyterhub.scheduling.userScheduler.tag. The default tag is set to the clusters
+kubernetes version.
+*/}}
+{{- define "jupyterhub.scheduler.image" -}}
+{{- $name := .Values.apps.jupyterhub.scheduling.userScheduler.image.name -}}
+{{- $valuesVersion := .Values.apps.jupyterhub.scheduling.userScheduler.image.tag -}}
+{{- $clusterVersion := (split "-" .Capabilities.KubeVersion.GitVersion)._0 -}}
+{{- $tag := $valuesVersion | default $clusterVersion -}}
+{{ $name }}:{{ $tag }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
new file mode 100755
index 00000000..03769bda
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/configmap.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: user-scheduler
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ {{- $defaultPolicy := .Files.Get "resources/jupyterhub/userscheduler-defaultpolicy.yaml" | fromYaml }}
+ policy.cfg: {{ .Values.apps.jupyterhub.scheduling.userScheduler.policy | default $defaultPolicy | toJson | quote }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
new file mode 100755
index 00000000..8a627042
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/deployment.yaml
@@ -0,0 +1,53 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: user-scheduler
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.apps.jupyterhub.scheduling.userScheduler.replicas }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ annotations:
+ # This lets us autorestart when the configmap changes!
+ checksum/config-map: {{ include "configmap.yaml" | sha256sum }}
+ spec:
+ {{- if .Values.apps.jupyterhub.rbac.enabled }}
+ serviceAccountName: user-scheduler
+ {{- end }}
+ {{- if .Values.apps.jupyterhub.scheduling.podPriority.enabled }}
+ priorityClassName: {{ .Release.Name }}-default-priority
+ {{- end }}
+ nodeSelector: {{ toJson .Values.apps.jupyterhub.scheduling.userScheduler.nodeSelector }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ containers:
+ - name: user-scheduler
+ image: {{ include "jupyterhub.scheduler.image" . }}
+ command:
+ - /usr/local/bin/kube-scheduler
+ - --scheduler-name={{ .Release.Name }}-user-scheduler
+ - --policy-configmap=user-scheduler
+ - --policy-configmap-namespace={{ .Release.Namespace }}
+ - --lock-object-name=user-scheduler
+ - --lock-object-namespace={{ .Release.Namespace }}
+ - --leader-elect-resource-lock=configmaps
+ - --v={{ .Values.apps.jupyterhub.scheduling.userScheduler.logLevel | default 4 }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 10251
+ initialDelaySeconds: 15
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 10251
+ resources:
+ {{- .Values.apps.jupyterhub.scheduling.userScheduler.resources | toYaml | trimSuffix "\n" | nindent 12 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
new file mode 100755
index 00000000..161b9730
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/pdb.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.pdb.enabled -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: user-scheduler
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ minAvailable: {{ .Values.apps.jupyterhub.scheduling.userScheduler.pdb.minAvailable }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
new file mode 100755
index 00000000..825d7c6e
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/scheduling/user-scheduler/rbac.yaml
@@ -0,0 +1,64 @@
+{{- if .Values.apps.jupyterhub.scheduling.userScheduler.enabled -}}
+{{- if .Values.apps.jupyterhub.rbac.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: user-scheduler
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-user-scheduler-base
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-base") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: user-scheduler
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: system:kube-scheduler
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-user-scheduler-complementary
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-complementary") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+rules:
+ # Support leader elections
+ - apiGroups: [""]
+ resourceNames: ["user-scheduler"]
+ resources: ["configmaps"]
+ verbs: ["get", "update"]
+ # Workaround for missing permission in system:kube-scheduler as of k8s 1.10.4
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ # Workaround for missing permission with rancher local-path-provisioner
+ - apiGroups: [""]
+ resources: ["persistentvolume", "persistentvolumeclaims"]
+ verbs: ["update"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Release.Name }}-user-scheduler-complementary
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-complementary") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: user-scheduler
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Release.Name }}-user-scheduler-complementary
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/image-credentials-secret.yaml b/applications/jupyterhub/deploy/templates/singleuser/image-credentials-secret.yaml
new file mode 100755
index 00000000..cda02246
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/singleuser/image-credentials-secret.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.apps.jupyterhub.singleuser.imagePullSecret.enabled }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: singleuser-image-credentials
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-image-credentials") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockersingleuserconfigjson" . }}
+{{- if .Values.apps.jupyterhub.prePuller.hook.enabled }}
+---
+kind: Secret
+apiVersion: v1
+metadata:
+ name: hook-singleuser-image-credentials
+ labels:
+ {{- $_ := merge (dict "componentPrefix" "hook-" "componentSuffix" "-image-credentials") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "-20"
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockersingleuserconfigjson" . }}
+{{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
new file mode 100755
index 00000000..cc9ec101
--- /dev/null
+++ b/applications/jupyterhub/deploy/templates/singleuser/netpol.yaml
@@ -0,0 +1,44 @@
+{{- if and .Values.apps.jupyterhub.singleuser.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: singleuser
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-singleuser: "true"
+ ports:
+ - protocol: TCP
+ port: 8888
+ {{- /* Useful if you want to give user server access to pods from other namespaces */}}
+ {{- if .Values.apps.jupyterhub.singleuser.networkPolicy.ingress }}
+ {{- .Values.apps.jupyterhub.singleuser.networkPolicy.ingress | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- /*
+ Override componentLabel because we need the label of the
+ destination, not the source
+ */}}
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - protocol: TCP
+ port: 8081
+ {{- if .Values.apps.jupyterhub.singleuser.networkPolicy.egress }}
+ {{- .Values.apps.jupyterhub.singleuser.networkPolicy.egress | toYaml | trimSuffix "\n" | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/applications/jupyterhub/deploy/values.yaml b/applications/jupyterhub/deploy/values.yaml
new file mode 100755
index 00000000..08ec0c64
--- /dev/null
+++ b/applications/jupyterhub/deploy/values.yaml
@@ -0,0 +1,283 @@
+harness:
+ subdomain: jupyterhub
+ service:
+ auto: false
+ port: 80
+ name: proxy-public
+ deployment:
+ auto: false
+custom: {}
+hub:
+ allowNamedServers: true
+ namedServerLimitPerUser: 10
+ service:
+ type: ClusterIP
+ annotations: {}
+ ports:
+ nodePort:
+ loadBalancerIP:
+ baseUrl: /
+ cookieSecret:
+ publicURL:
+ initContainers: []
+ uid: 1000
+ fsGid: 1000
+ nodeSelector: {}
+ concurrentSpawnLimit: 64
+ consecutiveFailureLimit: 5
+ activeServerLimit:
+ image:
+ pullSecrets: false
+ imagePullSecret:
+ enabled: false
+ livenessProbe:
+ enabled: false
+ readinessProbe:
+ enabled: false
+ deploymentStrategy:
+ type: Recreate
+ rollingUpdate: null
+ db:
+ type: sqlite-pvc
+ upgrade: null
+ pvc:
+ annotations: {}
+ selector: {}
+ accessModes:
+ - ReadWriteOnce
+ storage: 1Gi
+ subPath: null
+ storageClassName: null
+ url: null
+ password: null
+ labels: {}
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: /hub/metrics
+ extraConfig:
+ timing: |
+ c.Spawner.port = 8000
+ c.Spawner.http_timeout = 300
+ c.Spawner.start_timeout = 300
+ c.JupyterHub.tornado_settings = { "headers": { }}
+ spawner: >-
+ c.Spawner.args = ["--debug", "--port=8000"]
+ extraConfigMap: {}
+ extraEnv: {}
+ extraContainers: []
+ extraVolumes: []
+ extraVolumeMounts: []
+ resources:
+ requests:
+ cpu: 200m
+ memory: 512Mi
+ services: {}
+ imagePullPolicy: IfNotPresent
+ pdb:
+ enabled: true
+ minAvailable: 1
+ networkPolicy:
+ enabled: false
+ egress:
+ - to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+rbac:
+ enabled: true
+proxy:
+ secretToken: b3fed077c7538cfc5e2a6469ddac7d43a18fc645789407b53e580b7342b968d8
+ service:
+ type: LoadBalancer
+ labels: {}
+ annotations: {}
+ nodePorts:
+ http: null
+ https: null
+ loadBalancerIP: null
+
+ chp:
+ image:
+ name: jupyterhub/configurable-http-proxy
+ tag: 4.0.1
+ pullPolicy: IfNotPresent
+ livenessProbe:
+ enabled: false
+ readinessProbe:
+ enabled: false
+ resources:
+ requests:
+ cpu: 200m
+ memory: 512Mi
+ nginx:
+ image:
+ name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
+ tag: 0.15.0
+ pullPolicy: IfNotPresent
+ proxyBodySize: 64m
+ resources: {}
+ lego:
+ image:
+ name: jetstack/kube-lego
+ tag: 0.1.7
+ pullPolicy: IfNotPresent
+ resources: {}
+ labels: {}
+ nodeSelector: {}
+ pdb:
+ enabled: true
+ minAvailable: 1
+ https:
+ enabled: true
+ type: letsencrypt
+ letsencrypt:
+ contactEmail: ''
+ manual:
+ key: null
+ cert: null
+ secret:
+ name: ''
+ key: tls.key
+ crt: tls.crt
+ hosts: []
+ networkPolicy:
+ enabled: false
+ egress:
+ - to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+auth:
+ type: tmp
+ whitelist:
+ users: null
+ admin:
+ access: true
+ users: null
+ dummy:
+ password: null
+ ldap:
+ dn:
+ search: {}
+ user: {}
+ user: {}
+ state:
+ enabled: false
+ cryptoKey: null
+singleuser:
+ extraTolerations: []
+ nodeSelector: {}
+ extraNodeAffinity:
+ required: []
+ preferred: []
+ extraPodAffinity:
+ required: []
+ preferred: []
+ extraPodAntiAffinity:
+ required: []
+ preferred: []
+ networkTools:
+ image:
+ name: jupyterhub/k8s-network-tools
+ tag: 0.9-b51ffeb
+ cloudMetadata:
+ enabled: false
+ ip: 169.254.169.254
+ networkPolicy:
+ enabled: false
+ egress:
+ - to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ except:
+ - 169.254.169.254/32
+ events: true
+ extraAnnotations: {}
+ extraLabels: {}
+ extraEnv: {}
+ lifecycleHooks: null
+ initContainers: []
+ extraContainers: []
+ uid: 1000
+ fsGid: 100
+ serviceAccountName: null
+ storage:
+ type: dynamic
+ capacity: 2Mi
+ dynamic:
+ pvcNameTemplate: jupyter-{userid}
+ volumeNameTemplate: jupyter-{userid}
+ homeMountPath: /home/workspace
+ extraLabels: {}
+ image:
+ name: jupyter/base-notebook
+ tag: hub-1.1.0
+ pullPolicy: IfNotPresent
+ imagePullSecret:
+ enabled: false
+ startTimeout: 300
+ cpu:
+ limit: null
+ guarantee: null
+ memory:
+ limit: null
+ guarantee: 0.5G
+ extraResource:
+ limits: {}
+ guarantees: {}
+ cmd: jupyterhub-singleuser
+ defaultUrl: null
+scheduling:
+ userScheduler:
+ enabled: false
+ replicas: 1
+ logLevel: 4
+ image:
+ name: gcr.io/google_containers/kube-scheduler-amd64
+ tag: v1.11.2
+ nodeSelector: {}
+ pdb:
+ enabled: true
+ minAvailable: 1
+ resources:
+ requests:
+ cpu: 50m
+ memory: 256Mi
+ podPriority:
+ enabled: false
+ userPlaceholder:
+ enabled: true
+ replicas: 0
+ corePods:
+ nodeAffinity:
+ matchNodePurpose: prefer
+ userPods:
+ nodeAffinity:
+ matchNodePurpose: prefer
+prePuller:
+ hook:
+ enabled: true
+ extraEnv: {}
+ image:
+ name: jupyterhub/k8s-image-awaiter
+ tag: 0.9-b51ffeb
+ continuous:
+ enabled: false
+ extraImages: {}
+ pause:
+ image:
+ name: gcr.io/google_containers/pause
+ tag: '3.0'
+ingress:
+ enabled: false
+ annotations: {}
+ hosts: []
+ pathSuffix: ''
+ tls: null
+cull:
+ enabled: true
+ users: false
+ timeout: 600
+ every: 60
+ concurrency: 10
+ maxAge: 0
+debug:
+ enabled: false
diff --git a/applications/jupyterhub/src/chauthenticator/README.rst b/applications/jupyterhub/src/chauthenticator/README.rst
new file mode 100644
index 00000000..e370a397
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/README.rst
@@ -0,0 +1,21 @@
+cloudharness keycloak authenticator
+===================================
+
+Authenticator to use Jupyterhub with the keycloak gatekeeper.
+
+
+Running Tests:
+--------------
+
+.. code-block:: bash
+
+ pip install -r test-requirements.txt
+ pip install -e .
+ pytest
+
+PyTest does a lot of caching and may run into troubles if old files linger around.
+Use one ore more of the following commands to clean up before running pytest.
+
+.. code-block:: bash
+
+ find ./ -name '*.pyc' -delete
diff --git a/applications/jupyterhub/src/chauthenticator/chauthenticator/__init__.py b/applications/jupyterhub/src/chauthenticator/chauthenticator/__init__.py
new file mode 100644
index 00000000..7c5267d7
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/chauthenticator/__init__.py
@@ -0,0 +1 @@
+from .auth import CloudHarnessAuthenticator
diff --git a/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py b/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py
new file mode 100644
index 00000000..284c988a
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/chauthenticator/auth.py
@@ -0,0 +1,87 @@
+import os
+
+from jupyterhub.auth import Authenticator
+from jupyterhub.handlers import BaseHandler
+from tornado import gen
+from traitlets import Bool
+from jupyterhub.utils import url_path_join
+from .utils import get_keycloak_data
+
+class CloudHarnessAuthenticateHandler(BaseHandler):
+ """
+ Handler for /chkclogin
+ Creates a new user based on the keycloak user, and auto starts their server
+ """
+ def initialize(self, force_new_server, process_user):
+ super().initialize()
+ self.force_new_server = force_new_server
+ self.process_user = process_user
+
+ @gen.coroutine
+ def get(self):
+ raw_user = yield self.get_current_user()
+ if raw_user:
+ if self.force_new_server and user.running:
+ # Stop user's current server if it is running
+ # so we get a new one.
+ status = yield raw_user.spawner.poll_and_notify()
+ if status is None:
+ yield self.stop_single_user(raw_user)
+ else:
+ accessToken = self.request.cookies.get('accessToken', None)
+
+ if accessToken == '-1' or not accessToken:
+ self.redirect('/hub/logout')
+
+ accessToken = accessToken.value
+ keycloak_id, keycloak_data = get_keycloak_data(accessToken)
+ username = keycloak_id
+ raw_user = self.user_from_username(username)
+ self.set_login_cookie(raw_user)
+ user = yield gen.maybe_future(self.process_user(raw_user, self))
+ self.redirect(self.get_next_url(user))
+
+
+class CloudHarnessAuthenticator(Authenticator):
+ """
+ JupyterHub Authenticator for use with Cloud Harness
+ When JupyterHub is configured to use this authenticator, the client
+ needs to set the accessToken domain cookie
+ """
+
+ auto_login = True
+ login_service = 'chkc'
+
+ force_new_server = Bool(
+ True,
+ help="""
+ Stop the user's server and start a new one when visiting /hub/chlogin
+ When set to True, users going to /hub/chlogin will *always* get a
+ new single-user server. When set to False, they'll be
+ redirected to their current session if one exists.
+ """,
+ config=True
+ )
+
+ def process_user(self, user, handler):
+ """
+ Do additional arbitrary things to the created user before spawn.
+ user is a user object, and handler is a CloudHarnessAuthenticateHandler
+ object. Should return the new user object.
+ This method can be a @tornado.gen.coroutine.
+ Note: This is primarily for overriding in subclasses
+ """
+ return user
+
+ def get_handlers(self, app):
+ # FIXME: How to do this better?
+ extra_settings = {
+ 'force_new_server': self.force_new_server,
+ 'process_user': self.process_user
+ }
+ return [
+ ('/chkclogin', CloudHarnessAuthenticateHandler, extra_settings)
+ ]
+
+ def login_url(self, base_url):
+ return url_path_join(base_url, 'chkclogin')
diff --git a/applications/jupyterhub/src/chauthenticator/chauthenticator/utils.py b/applications/jupyterhub/src/chauthenticator/chauthenticator/utils.py
new file mode 100644
index 00000000..da5aec14
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/chauthenticator/utils.py
@@ -0,0 +1,44 @@
+import os
+import jwt
+import sys
+import json
+import requests
+from z2jh import get_config
+
+def get_keycloak_data(token):
+ print(f'Token: {token}')
+ if not token:
+ decoded_token = None
+ keycloak_id = -1 # No authorization --> no user --> only publicable workspaces
+ else:
+ decoded_token = _decode_token(token)
+ keycloak_id = decoded_token['sub']
+ return keycloak_id, decoded_token
+
+
+def _decode_token(token):
+ """
+ Check and retrieve authentication information from custom bearer token.
+ Returned value will be passed in 'token_info' parameter of your operation function, if there is one.
+ 'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one.
+
+ :param token Token provided by Authorization header
+ :type token: str
+ :return: Decoded token information or None if token is invalid
+ :rtype: dict | None
+ """
+ SCHEMA = 'http://'
+ AUTH_SERVICE_HOST = os.getenv('ACCOUNTS_SERVICE_HOST')
+ AUTH_SERVICE_PORT = os.getenv('ACCOUNTS_SERVICE_PORT_HTTP')
+ AUTH_DOMAIN = f'{AUTH_SERVICE_HOST}:{AUTH_SERVICE_PORT}'
+ AUTH_REALM = get_config('namespace')
+ BASE_PATH = f'{AUTH_DOMAIN}/auth/realms/{AUTH_REALM}'
+ AUTH_PUBLIC_KEY_URL = f'{SCHEMA}{BASE_PATH}'
+ print(f'auth pub key url: {AUTH_PUBLIC_KEY_URL}')
+
+ KEY = json.loads(requests.get(AUTH_PUBLIC_KEY_URL, verify=False).text)['public_key']
+ KEY = b"-----BEGIN PUBLIC KEY-----\n" + str.encode(KEY) + b"\n-----END PUBLIC KEY-----"
+
+ decoded = jwt.decode(token, KEY, algorithms='RS256', audience='account')
+
+ return decoded
\ No newline at end of file
diff --git a/applications/jupyterhub/src/chauthenticator/setup.py b/applications/jupyterhub/src/chauthenticator/setup.py
new file mode 100644
index 00000000..cc9e3714
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/setup.py
@@ -0,0 +1,16 @@
+from setuptools import setup, find_packages
+
+setup(
+ name='chauthenticator',
+ version='0.1.0',
+ install_requires=[
+ 'oauthenticator',
+ 'python-jose'
+ ],
+ description='Authenticator to use Jupyterhub with the keycloak gatekeeper.',
+ url='',
+ author='Zoran Sinnema',
+ author_email='zoran@metacell.us',
+ license='BSD',
+ packages=['chauthenticator'],
+)
diff --git a/applications/jupyterhub/src/chauthenticator/test-requirements.txt b/applications/jupyterhub/src/chauthenticator/test-requirements.txt
new file mode 100644
index 00000000..4f4df2f9
--- /dev/null
+++ b/applications/jupyterhub/src/chauthenticator/test-requirements.txt
@@ -0,0 +1,8 @@
+codecov
+flake8
+# current pytest-tornado does not work with pytest 4
+pytest >=2.8,<4.0.0
+pytest-cov
+pytest-tornado
+
+jupyterhub>=0.9.0
diff --git a/applications/workflows/src/__init__.py b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from applications/workflows/src/__init__.py
rename to applications/jupyterhub/src/harness_jupyter/harness_jupyter/__init__.py
diff --git a/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
new file mode 100755
index 00000000..24e07a68
--- /dev/null
+++ b/applications/jupyterhub/src/harness_jupyter/harness_jupyter/jupyterhub.py
@@ -0,0 +1,51 @@
+import logging
+import sys
+import imp
+
+from kubespawner.spawner import KubeSpawner
+
+def harness_hub():
+ """Wraps the method to change spawner configuration"""
+ KubeSpawner.get_pod_manifest_base = KubeSpawner.get_pod_manifest
+ KubeSpawner.get_pod_manifest = spawner_pod_manifest
+
+def spawner_pod_manifest(self: KubeSpawner):
+ print("Cloudharness: changing pod manifest")
+ change_pod_manifest(self)
+
+ return KubeSpawner.get_pod_manifest_base(self)
+
+def change_pod_manifest(self: KubeSpawner):
+ subdomain = self.handler.request.host.split('.')[0]
+ try:
+ app_config = self.config['apps']
+ registry = self.config['registry']
+ for app in app_config.values():
+ if 'harness' in app:
+
+ harness = app['harness']
+ if 'jupyterhub' in harness and harness['jupyterhub']\
+ and 'subdomain' in harness and harness['subdomain'] == subdomain:
+ print('Change image to %s', harness['deployment']['image'])
+ self.image = harness['deployment']['image']
+ if registry['name'] in self.image and registry['secret']:
+ self.image_pull_secrets = registry['secret']
+ if 'args' in harness['jupyterhub']:
+ self.args = harness['jupyterhub']['args']
+
+ # check if there is an applicationHook defined in the values.yaml
+ # if so then execute the applicationHook function with "self" as parameter
+ #
+ # e.g.
+ # jupyterhub:
+ # applicationHook: "jupyter.change_pod_manifest"
+ #
+ # this will execute jupyter.change_pod_manifest(self=self)
+ if 'applicationHook' in harness['jupyterhub']:
+ func_name = harness['jupyterhub']['applicationHook'].split('.')
+ module = __import__('.'.join(func_name[:-1]))
+ f = getattr(module, func_name[-1])
+ f(self=self)
+ break
+ except Exception as e:
+ logging.error("Harness error changing manifest", exc_info=True)
diff --git a/applications/jupyterhub/src/harness_jupyter/setup.py b/applications/jupyterhub/src/harness_jupyter/setup.py
new file mode 100755
index 00000000..d8e03b51
--- /dev/null
+++ b/applications/jupyterhub/src/harness_jupyter/setup.py
@@ -0,0 +1,17 @@
+from setuptools import setup, find_packages
+
+REQUIREMENTS = [
+ 'jupyterhub-kubespawner',
+]
+
+setup(
+ name='harness_jupyter',
+ version='0.1',
+ packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
+ url='',
+ license='MIT',
+ install_requires=REQUIREMENTS,
+ author='Filippo Ledda',
+ author_email='filippo@metacell.us',
+ description='Utilities to integrate Cloud Harness functionalities with Jupyter applications'
+)
diff --git a/applications/samples/api/samples.yaml b/applications/samples/api/samples.yaml
index 6cf5e820..f957880f 100644
--- a/applications/samples/api/samples.yaml
+++ b/applications/samples/api/samples.yaml
@@ -35,6 +35,23 @@ paths:
"400":
description: bad input parameter
+ /error:
+ get:
+ summary: test sentry is working
+ operationId: error
+ tags:
+ - workflows
+ responses:
+ "500":
+ description: Sentry entry should come!
+ "200":
+ description: This won't happen
+ content:
+ application/json:
+ schema:
+ type: string
+
+
/operation_sync:
get:
summary: Send a synchronous operation
diff --git a/applications/samples/deploy/values.yaml b/applications/samples/deploy/values.yaml
index 91949d61..ecff24d9 100644
--- a/applications/samples/deploy/values.yaml
+++ b/applications/samples/deploy/values.yaml
@@ -1,4 +1,11 @@
-port: 8080
-subdomain: samples
-autodeploy: true
-autoservice: true
\ No newline at end of file
+harness:
+ subdomain: samples
+ secured: false
+ sentry: true
+ port: 80
+ service:
+ port: 8080
+ auto: true
+ deployment:
+ auto: true
+ port: 8080
diff --git a/applications/samples/server/.openapi-generator-ignore b/applications/samples/server/.openapi-generator-ignore
index 7484ee59..5cc13cc6 100644
--- a/applications/samples/server/.openapi-generator-ignore
+++ b/applications/samples/server/.openapi-generator-ignore
@@ -21,3 +21,7 @@
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md
+setup.py
+*/controllers/*
+*/models/*
+Dockerfile
\ No newline at end of file
diff --git a/applications/samples/server/Dockerfile b/applications/samples/server/Dockerfile
index 006e6e33..d74b98f7 100644
--- a/applications/samples/server/Dockerfile
+++ b/applications/samples/server/Dockerfile
@@ -1,4 +1,6 @@
-FROM python:3-alpine
+ARG REGISTRY
+ARG TAG=latest
+FROM ${REGISTRY}cloudharness-base:${TAG}
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
diff --git a/applications/samples/server/api_samples/controllers/auth_controller.py b/applications/samples/server/api_samples/controllers/auth_controller.py
index 9d7e8159..6561b554 100644
--- a/applications/samples/server/api_samples/controllers/auth_controller.py
+++ b/applications/samples/server/api_samples/controllers/auth_controller.py
@@ -6,11 +6,11 @@
def valid_token(): # noqa: E501
- """Check if the token is valid. Get a token by logging into the base url
+ """Check if the token is valid. Get a token by logging into the dashboard
Check if the token is valid # noqa: E501
:rtype: List[Valid]
"""
- return 'do some magic!'
+ return 'OK!'
diff --git a/applications/samples/server/api_samples/controllers/workflows_controller.py b/applications/samples/server/api_samples/controllers/workflows_controller.py
index 3f5ccfbf..cf4fea81 100644
--- a/applications/samples/server/api_samples/controllers/workflows_controller.py
+++ b/applications/samples/server/api_samples/controllers/workflows_controller.py
@@ -3,40 +3,81 @@
from api_samples.models.inline_response202 import InlineResponse202 # noqa: E501
from api_samples import util
+from api_samples.models import InlineResponse202
+from api_samples.models.inline_response202_task import InlineResponse202Task
+from flask.json import jsonify
+
+from cloudharness import log
+
+try:
+ from cloudharness.workflows import operations, tasks
+except Exception as e:
+ log.error("Cannot start workflows module. Probably this is related some problem with the kubectl configuration", e)
def submit_async(): # noqa: E501
- """Send an asynchronous operation
+ """Send an asyncronous operation
# noqa: E501
:rtype: InlineResponse202
"""
- return 'do some magic!'
+ shared_directory = '/mnt/shared'
+ task_write = tasks.CustomTask('download-file', 'workflows-extract-download', url='https://raw.githubusercontent.com/openworm/org.geppetto/master/README.md')
+ task_print = tasks.CustomTask('print-file', 'workflows-print-file', file_path=shared_directory + '/README.md')
+ op = operations.PipelineOperation('test-custom-connected-op-', (task_write, task_print), shared_directory=shared_directory)
+
+ submitted = op.execute()
+ if not op.is_error():
+ return InlineResponse202(task= InlineResponse202Task(href=op.get_operation_update_url(), name=submitted.name)), 202
+ else:
+ return 'Error submitting operation', 500
def submit_sync(): # noqa: E501
- """Send a synchronous operation
+ """Send a syncronous operation
# noqa: E501
:rtype: str
"""
- return 'do some magic!'
+ task = tasks.CustomTask('download-file', 'workflows-extract-download', url='https://www.metacell.us')
+
+ op = operations.DistributedSyncOperation('test-sync-op-', task)
+ workflow = op.execute()
+ return workflow.raw
-def submit_sync_with_results(a=None, b=None): # noqa: E501
+def submit_sync_with_results(a=1, b=2): # noqa: E501
"""Send a synchronous operation and get results using the event queue. Just a sum, but in the cloud
# noqa: E501
:param a: first number to sum
- :type a:
+ :type a: float
:param b: second number to sum
- :type b:
+ :type b: float
+
+ :rtype: str
+ """
+ task = tasks.CustomTask('test-sum', 'samples-sum', a=a, b=b)
+ try:
+ op = operations.DistributedSyncOperationWithResults('test-sync-op-results-', task)
+ result = op.execute()
+ return result
+ except Exception as e:
+ return jsonify(str(e)), 200
+
+
+
+def error(): # noqa: E501
+ """test sentry is working
+
+ # noqa: E501
+
:rtype: str
"""
- return 'do some magic!'
+ return "a"[2]
diff --git a/applications/samples/server/api_samples/models/inline_response202.py b/applications/samples/server/api_samples/models/inline_response202.py
index 597ed98f..9d389979 100644
--- a/applications/samples/server/api_samples/models/inline_response202.py
+++ b/applications/samples/server/api_samples/models/inline_response202.py
@@ -9,7 +9,7 @@
from api_samples.models.inline_response202_task import InlineResponse202Task
from api_samples import util
-from api_samples.models.inline_response202_task import InlineResponse202Task # noqa: E501
+
class InlineResponse202(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
diff --git a/applications/samples/server/api_samples/openapi/openapi.yaml b/applications/samples/server/api_samples/openapi/openapi.yaml
index 126520be..9f8d242d 100644
--- a/applications/samples/server/api_samples/openapi/openapi.yaml
+++ b/applications/samples/server/api_samples/openapi/openapi.yaml
@@ -13,6 +13,22 @@ tags:
- name: auth
- name: workflows
paths:
+ /error:
+ get:
+ operationId: error
+ responses:
+ "500":
+ description: Sentry entry should come!
+ "200":
+ content:
+ application/json:
+ schema:
+ type: string
+ description: This won't happen
+ summary: test sentry is working
+ tags:
+ - workflows
+ x-openapi-router-controller: api_samples.controllers.workflows_controller
/operation_async:
get:
operationId: submit_async
diff --git a/applications/workflows/src/workflows_api/__init__.py b/applications/samples/server/api_samples/service/__init__.py
similarity index 100%
rename from applications/workflows/src/workflows_api/__init__.py
rename to applications/samples/server/api_samples/service/__init__.py
diff --git a/applications/samples/src/api_samples/service/security_service.py b/applications/samples/server/api_samples/service/security_service.py
similarity index 100%
rename from applications/samples/src/api_samples/service/security_service.py
rename to applications/samples/server/api_samples/service/security_service.py
diff --git a/applications/samples/server/api_samples/test/test_auth_controller.py b/applications/samples/server/api_samples/test/test_auth_controller.py
index 962c63cd..d68f15c1 100644
--- a/applications/samples/server/api_samples/test/test_auth_controller.py
+++ b/applications/samples/server/api_samples/test/test_auth_controller.py
@@ -1,7 +1,6 @@
# coding: utf-8
from __future__ import absolute_import
-import unittest
from flask import json
from six import BytesIO
@@ -16,19 +15,15 @@ class TestAuthController(BaseTestCase):
def test_valid_token(self):
"""Test case for valid_token
- Check if the token is valid. Get a token by logging into the base url
+ Check if the token is valid
"""
- headers = {
- 'Accept': 'application/json',
- 'Authorization': 'Bearer special-key',
- }
response = self.client.open(
- '/api/valid',
- method='GET',
- headers=headers)
+ '/0.1.0/valid',
+ method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
+ import unittest
unittest.main()
diff --git a/applications/samples/src/api_samples/test/test_default_controller.py b/applications/samples/server/api_samples/test/test_default_controller.py
similarity index 100%
rename from applications/samples/src/api_samples/test/test_default_controller.py
rename to applications/samples/server/api_samples/test/test_default_controller.py
diff --git a/applications/samples/server/api_samples/test/test_workflows_controller.py b/applications/samples/server/api_samples/test/test_workflows_controller.py
index 07760241..4bb1998e 100644
--- a/applications/samples/server/api_samples/test/test_workflows_controller.py
+++ b/applications/samples/server/api_samples/test/test_workflows_controller.py
@@ -1,7 +1,6 @@
# coding: utf-8
from __future__ import absolute_import
-import unittest
from flask import json
from six import BytesIO
@@ -13,54 +12,29 @@
class TestWorkflowsController(BaseTestCase):
"""WorkflowsController integration test stubs"""
- def test_submit_async(self):
- """Test case for submit_async
+ def test_operation_submit_async(self):
+ """Test case for operation_submit_async
- Send an asynchronous operation
+ Send an asyncronous operation
"""
- headers = {
- 'Accept': 'application/json',
- }
response = self.client.open(
- '/api/operation_async',
- method='GET',
- headers=headers)
+ '/0.1.0/operation_async',
+ method='POST')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
- def test_submit_sync(self):
- """Test case for submit_sync
+ def test_operation_submit_sync(self):
+ """Test case for operation_submit_sync
- Send a synchronous operation
+ Send a syncronous operation
"""
- headers = {
- 'Accept': 'application/json',
- }
response = self.client.open(
- '/api/operation_sync',
- method='GET',
- headers=headers)
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
- def test_submit_sync_with_results(self):
- """Test case for submit_sync_with_results
-
- Send a synchronous operation and get results using the event queue. Just a sum, but in the cloud
- """
- query_string = [('a', 10),
- ('b', 10)]
- headers = {
- 'Accept': 'application/json',
- }
- response = self.client.open(
- '/api/operation_sync_results',
- method='GET',
- headers=headers,
- query_string=query_string)
+ '/0.1.0/operation_sync',
+ method='POST')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
+ import unittest
unittest.main()
diff --git a/applications/samples/server/setup.py b/applications/samples/server/setup.py
index e1d27372..3e72420c 100644
--- a/applications/samples/server/setup.py
+++ b/applications/samples/server/setup.py
@@ -16,7 +16,9 @@
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
- "python_dateutil>=2.6.0"
+ "python_dateutil>=2.6.0",
+ "pyjwt>=1.7.1",
+ "cloudharness"
]
setup(
diff --git a/applications/samples/src/www/index.html b/applications/samples/server/www/index.html
similarity index 100%
rename from applications/samples/src/www/index.html
rename to applications/samples/server/www/index.html
diff --git a/applications/samples/src/api_samples/__main__.py b/applications/samples/src/api_samples/__main__.py
deleted file mode 100644
index 9df35748..00000000
--- a/applications/samples/src/api_samples/__main__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python3
-
-import connexion
-
-from api_samples import encoder
-
-
-def main():
- app = connexion.App(__name__, specification_dir='./openapi/')
- app.app.json_encoder = encoder.JSONEncoder
- app.add_api('openapi.yaml',
- arguments={'title': 'CloudHarness Sample API'},
- pythonic_params=True)
- app.run(port=8080)
-
-
-if __name__ == '__main__':
- main()
diff --git a/applications/samples/src/api_samples/controllers/auth_controller.py b/applications/samples/src/api_samples/controllers/auth_controller.py
deleted file mode 100644
index 6561b554..00000000
--- a/applications/samples/src/api_samples/controllers/auth_controller.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import connexion
-import six
-
-from api_samples.models.valid import Valid # noqa: E501
-from api_samples import util
-
-
-def valid_token(): # noqa: E501
- """Check if the token is valid. Get a token by logging into the dashboard
-
- Check if the token is valid # noqa: E501
-
-
- :rtype: List[Valid]
- """
- return 'OK!'
diff --git a/applications/samples/src/api_samples/controllers/workflows_controller.py b/applications/samples/src/api_samples/controllers/workflows_controller.py
deleted file mode 100644
index 24744603..00000000
--- a/applications/samples/src/api_samples/controllers/workflows_controller.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import connexion
-import six
-
-from api_samples.models.inline_response202 import InlineResponse202 # noqa: E501
-from api_samples import util
-from api_samples.models import InlineResponse202
-from api_samples.models.inline_response202_task import InlineResponse202Task
-from flask.json import jsonify
-
-from cloudharness import log
-
-try:
- from cloudharness.workflows import operations, tasks
-except Exception as e:
- log.error("Cannot start workflows module. Probably this is related some problem with the kubectl configuration", e)
-
-
-def submit_async(): # noqa: E501
- """Send an asyncronous operation
-
- # noqa: E501
-
-
- :rtype: InlineResponse202
- """
- shared_directory = '/mnt/shared'
- task_write = tasks.CustomTask('download-file', 'workflows-extract-download', url='https://raw.githubusercontent.com/openworm/org.geppetto/master/README.md')
- task_print = tasks.CustomTask('print-file', 'workflows-print-file', file_path=shared_directory + '/README.md')
- op = operations.PipelineOperation('test-custom-connected-op-', (task_write, task_print), shared_directory=shared_directory)
-
- submitted = op.execute()
- if not op.is_error():
- return InlineResponse202(task= InlineResponse202Task(href=op.get_operation_update_url(), name=submitted.name)), 202
- else:
- return 'Error submitting operation', 500
-
-
-def submit_sync(): # noqa: E501
- """Send a syncronous operation
-
- # noqa: E501
-
-
- :rtype: str
- """
- task = tasks.CustomTask('download-file', 'workflows-extract-download', url='https://www.metacell.us')
-
- op = operations.DistributedSyncOperation('test-sync-op-', task)
- workflow = op.execute()
- return workflow.raw
-
-
-def submit_sync_with_results(a=1, b=2): # noqa: E501
- """Send a synchronous operation and get results using the event queue. Just a sum, but in the cloud
-
- # noqa: E501
-
- :param a: first number to sum
- :type a: float
- :param b: second number to sum
- :type b: float
-
- :rtype: str
- """
- task = tasks.CustomTask('test-sum', 'samples-sum', a=a, b=b)
- try:
- op = operations.DistributedSyncOperationWithResults('test-sync-op-results-', task)
- result = op.execute()
- return result
- except Exception as e:
- return jsonify(str(e)), 200
diff --git a/applications/samples/src/api_samples/models/__init__.py b/applications/samples/src/api_samples/models/__init__.py
deleted file mode 100644
index 260fdebe..00000000
--- a/applications/samples/src/api_samples/models/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# coding: utf-8
-
-# flake8: noqa
-from __future__ import absolute_import
-# import models into model package
-from api_samples.models.inline_response202 import InlineResponse202
-from api_samples.models.inline_response202_task import InlineResponse202Task
-from api_samples.models.valid import Valid
diff --git a/applications/samples/src/api_samples/models/inline_response202.py b/applications/samples/src/api_samples/models/inline_response202.py
deleted file mode 100644
index 9d389979..00000000
--- a/applications/samples/src/api_samples/models/inline_response202.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from api_samples.models.base_model_ import Model
-from api_samples.models.inline_response202_task import InlineResponse202Task
-from api_samples import util
-
-
-
-class InlineResponse202(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, task=None): # noqa: E501
- """InlineResponse202 - a model defined in OpenAPI
-
- :param task: The task of this InlineResponse202. # noqa: E501
- :type task: InlineResponse202Task
- """
- self.openapi_types = {
- 'task': InlineResponse202Task
- }
-
- self.attribute_map = {
- 'task': 'task'
- }
-
- self._task = task
-
- @classmethod
- def from_dict(cls, dikt) -> 'InlineResponse202':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The inline_response_202 of this InlineResponse202. # noqa: E501
- :rtype: InlineResponse202
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def task(self):
- """Gets the task of this InlineResponse202.
-
-
- :return: The task of this InlineResponse202.
- :rtype: InlineResponse202Task
- """
- return self._task
-
- @task.setter
- def task(self, task):
- """Sets the task of this InlineResponse202.
-
-
- :param task: The task of this InlineResponse202.
- :type task: InlineResponse202Task
- """
-
- self._task = task
diff --git a/applications/samples/src/api_samples/models/inline_response202_task.py b/applications/samples/src/api_samples/models/inline_response202_task.py
deleted file mode 100644
index 465a8824..00000000
--- a/applications/samples/src/api_samples/models/inline_response202_task.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from api_samples.models.base_model_ import Model
-from api_samples import util
-
-
-class InlineResponse202Task(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, href=None, name=None): # noqa: E501
- """InlineResponse202Task - a model defined in OpenAPI
-
- :param href: The href of this InlineResponse202Task. # noqa: E501
- :type href: str
- :param name: The name of this InlineResponse202Task. # noqa: E501
- :type name: str
- """
- self.openapi_types = {
- 'href': str,
- 'name': str
- }
-
- self.attribute_map = {
- 'href': 'href',
- 'name': 'name'
- }
-
- self._href = href
- self._name = name
-
- @classmethod
- def from_dict(cls, dikt) -> 'InlineResponse202Task':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The inline_response_202_task of this InlineResponse202Task. # noqa: E501
- :rtype: InlineResponse202Task
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def href(self):
- """Gets the href of this InlineResponse202Task.
-
- the url where to check the operation status # noqa: E501
-
- :return: The href of this InlineResponse202Task.
- :rtype: str
- """
- return self._href
-
- @href.setter
- def href(self, href):
- """Sets the href of this InlineResponse202Task.
-
- the url where to check the operation status # noqa: E501
-
- :param href: The href of this InlineResponse202Task.
- :type href: str
- """
-
- self._href = href
-
- @property
- def name(self):
- """Gets the name of this InlineResponse202Task.
-
-
- :return: The name of this InlineResponse202Task.
- :rtype: str
- """
- return self._name
-
- @name.setter
- def name(self, name):
- """Sets the name of this InlineResponse202Task.
-
-
- :param name: The name of this InlineResponse202Task.
- :type name: str
- """
-
- self._name = name
diff --git a/applications/samples/src/api_samples/models/valid.py b/applications/samples/src/api_samples/models/valid.py
deleted file mode 100644
index eae6c5f4..00000000
--- a/applications/samples/src/api_samples/models/valid.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from api_samples.models.base_model_ import Model
-from api_samples import util
-
-
-class Valid(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, response=None): # noqa: E501
- """Valid - a model defined in OpenAPI
-
- :param response: The response of this Valid. # noqa: E501
- :type response: str
- """
- self.openapi_types = {
- 'response': str
- }
-
- self.attribute_map = {
- 'response': 'response'
- }
-
- self._response = response
-
- @classmethod
- def from_dict(cls, dikt) -> 'Valid':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The Valid of this Valid. # noqa: E501
- :rtype: Valid
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def response(self):
- """Gets the response of this Valid.
-
-
- :return: The response of this Valid.
- :rtype: str
- """
- return self._response
-
- @response.setter
- def response(self, response):
- """Sets the response of this Valid.
-
-
- :param response: The response of this Valid.
- :type response: str
- """
-
- self._response = response
diff --git a/applications/samples/src/api_samples/openapi/openapi.yaml b/applications/samples/src/api_samples/openapi/openapi.yaml
deleted file mode 100644
index 126520be..00000000
--- a/applications/samples/src/api_samples/openapi/openapi.yaml
+++ /dev/null
@@ -1,134 +0,0 @@
-openapi: 3.0.0
-info:
- contact:
- email: cloudharness@metacell.us
- description: CloudHarness Sample api
- license:
- name: UNLICENSED
- title: CloudHarness Sample API
- version: 0.1.0
-servers:
-- url: https://samples.cloudharness.metacell.us/api
-tags:
-- name: auth
-- name: workflows
-paths:
- /operation_async:
- get:
- operationId: submit_async
- responses:
- "202":
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/inline_response_202'
- description: Submitted operation. See also https://restfulapi.net/http-status-202-accepted/
- summary: Send an asynchronous operation
- tags:
- - workflows
- x-openapi-router-controller: api_samples.controllers.workflows_controller
- /operation_sync:
- get:
- operationId: submit_sync
- responses:
- "200":
- content:
- application/json:
- schema:
- type: string
- description: Operation result
- summary: Send a synchronous operation
- tags:
- - workflows
- x-openapi-router-controller: api_samples.controllers.workflows_controller
- /operation_sync_results:
- get:
- operationId: submit_sync_with_results
- parameters:
- - description: first number to sum
- example: 10
- explode: true
- in: query
- name: a
- required: false
- schema:
- type: number
- style: form
- - description: second number to sum
- example: 10
- explode: true
- in: query
- name: b
- required: false
- schema:
- type: number
- style: form
- responses:
- "200":
- content:
- application/json:
- schema:
- type: string
- description: Operation result
- summary: Send a synchronous operation and get results using the event queue.
- Just a sum, but in the cloud
- tags:
- - workflows
- x-openapi-router-controller: api_samples.controllers.workflows_controller
- /valid:
- get:
- description: |
- Check if the token is valid
- operationId: valid_token
- responses:
- "200":
- content:
- application/json:
- schema:
- items:
- $ref: '#/components/schemas/Valid'
- type: array
- description: Check if token is valid
- "400":
- description: bad input parameter
- security:
- - bearerAuth: []
- summary: Check if the token is valid. Get a token by logging into the base url
- tags:
- - auth
- x-openapi-router-controller: api_samples.controllers.auth_controller
-components:
- schemas:
- Valid:
- example:
- response: response
- properties:
- response:
- type: string
- type: object
- inline_response_202_task:
- example:
- name: my-op
- href: http://workflows.cloudharness.metacell.us/api/operation/my-op
- properties:
- href:
- description: the url where to check the operation status
- example: http://workflows.cloudharness.metacell.us/api/operation/my-op
- type: string
- name:
- example: my-op
- type: string
- inline_response_202:
- example:
- task:
- name: my-op
- href: http://workflows.cloudharness.metacell.us/api/operation/my-op
- properties:
- task:
- $ref: '#/components/schemas/inline_response_202_task'
- securitySchemes:
- bearerAuth:
- bearerFormat: JWT
- scheme: bearer
- type: http
- x-bearerInfoFunc: cloudharness.auth.decode_token
diff --git a/applications/samples/src/api_samples/test/test_auth_controller.py b/applications/samples/src/api_samples/test/test_auth_controller.py
deleted file mode 100644
index d68f15c1..00000000
--- a/applications/samples/src/api_samples/test/test_auth_controller.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-from flask import json
-from six import BytesIO
-
-from api_samples.models.valid import Valid # noqa: E501
-from api_samples.test import BaseTestCase
-
-
-class TestAuthController(BaseTestCase):
- """AuthController integration test stubs"""
-
- def test_valid_token(self):
- """Test case for valid_token
-
- Check if the token is valid
- """
- response = self.client.open(
- '/0.1.0/valid',
- method='GET')
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
-
-if __name__ == '__main__':
- import unittest
- unittest.main()
diff --git a/applications/samples/src/api_samples/test/test_workflows_controller.py b/applications/samples/src/api_samples/test/test_workflows_controller.py
deleted file mode 100644
index 4bb1998e..00000000
--- a/applications/samples/src/api_samples/test/test_workflows_controller.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-from flask import json
-from six import BytesIO
-
-from api_samples.models.inline_response202 import InlineResponse202 # noqa: E501
-from api_samples.test import BaseTestCase
-
-
-class TestWorkflowsController(BaseTestCase):
- """WorkflowsController integration test stubs"""
-
- def test_operation_submit_async(self):
- """Test case for operation_submit_async
-
- Send an asyncronous operation
- """
- response = self.client.open(
- '/0.1.0/operation_async',
- method='POST')
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
- def test_operation_submit_sync(self):
- """Test case for operation_submit_sync
-
- Send a syncronous operation
- """
- response = self.client.open(
- '/0.1.0/operation_sync',
- method='POST')
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
-
-if __name__ == '__main__':
- import unittest
- unittest.main()
diff --git a/applications/samples/src/requirements.txt b/applications/samples/src/requirements.txt
deleted file mode 100644
index 4fe6c15c..00000000
--- a/applications/samples/src/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-connexion >= 2.6.0; python_version>="3.6"
-connexion >= 2.3.0; python_version=="3.5"
-connexion >= 2.3.0; python_version=="3.4"
-connexion == 2.4.0; python_version<="2.7"
-swagger-ui-bundle >= 0.0.2
-python_dateutil >= 2.6.0
-setuptools >= 21.0.0
-pyjwt>=1.7.1
\ No newline at end of file
diff --git a/applications/samples/tasks/sum/Dockerfile b/applications/samples/tasks/sum/Dockerfile
index 2c344f77..1675a8fc 100644
--- a/applications/samples/tasks/sum/Dockerfile
+++ b/applications/samples/tasks/sum/Dockerfile
@@ -1,4 +1,4 @@
-ARG REGISTRY=r.cfcr.io/tarelli/
+ARG REGISTRY
ARG TAG=latest
FROM ${REGISTRY}cloudharness-base:${TAG}
diff --git a/applications/sentry/Dockerfile b/applications/sentry/Dockerfile
new file mode 100644
index 00000000..9b190cf5
--- /dev/null
+++ b/applications/sentry/Dockerfile
@@ -0,0 +1,18 @@
+FROM sentry:9.1.2
+
+ADD sentryrunner.sh ./sentryrunner.sh
+RUN chmod +x ./sentryrunner.sh
+
+ENV SENTRY_REDIS_HOST=sentry-redis-host
+ENV SENTRY_REDIS_PORT=6379
+ENV SENTRY_POSTGRES_HOST=sentry-postgres-host
+ENV SENTRY_POSTGRES_PORT=5432
+ENV SENTRY_DB_NAME=sentry
+ENV SENTRY_DB_USER=sentry
+ENV SENTRY_DB_PASSWORD=secret
+ENV SENTRY_EMAIL_HOST=sentry-postfix-host
+ENV SENTRY_ADMIN_USER=sentry
+ENV SENTRY_ADMIN_PASSWORD=secret
+ENV SENTRY_EMAIL_FROM=sentry
+
+ENTRYPOINT ["./sentryrunner.sh"]
diff --git a/applications/sentry/README.md b/applications/sentry/README.md
new file mode 100644
index 00000000..286a5a2b
--- /dev/null
+++ b/applications/sentry/README.md
@@ -0,0 +1,18 @@
+# Sentry
+
+Sentry provides self-hosted and cloud-based error monitoring that helps all software
+teams discover, triage, and prioritize errors in real-time.
+
+see [https://sentry.io/](https://sentry.io/)
+
+## Dashboard
+To open the sentry dashboard please login in on this url https://sentry./
+
+For example [https://sentry.osb.local/](https://sentry.osb.local/)
+
+credentials:
+```
+user : sentry@
+password: secret
+
+```
\ No newline at end of file
diff --git a/applications/sentry/deploy/templates/postfix.yaml b/applications/sentry/deploy/templates/postfix.yaml
new file mode 100644
index 00000000..82107bb5
--- /dev/null
+++ b/applications/sentry/deploy/templates/postfix.yaml
@@ -0,0 +1,61 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.apps.sentry.postfix.name }}
+ labels:
+ app: {{ .Values.apps.sentry.postfix.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - port: {{ .Values.apps.sentry.postfix.port }}
+ selector:
+ app: {{ .Values.apps.sentry.postfix.name }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.apps.sentry.postfix.name | quote }}
+ labels:
+ app: {{ .Values.apps.sentry.postfix.name | quote }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Values.apps.sentry.postfix.name | quote }}
+{{ include "deploy_utils.labels" $ | indent 6 }}
+ template:
+ metadata:
+ {{- if .Values.apps.sentry.postfix.harvest }}
+ annotations:
+ co.elastic.logs/enabled: "true"
+ metricbeat: "true"
+ {{- end }}
+ labels:
+ app: {{ .Values.apps.sentry.postfix.name | quote }}
+{{ include "deploy_utils.labels" $ | indent 8 }}
+ spec:
+ {{ if .Values.codefresh }}
+ imagePullSecrets:
+ - name: {{ .Values.codefresh.secret }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.apps.sentry.postfix.name | default "sentry-postfix" | quote }}
+ image: {{ .Values.apps.sentry.postfix.image }}
+ imagePullPolicy: {{ include "deploy_utils.pullpolicy" .root }}
+ env:
+ - name: MTP_HOST
+ value: {{ printf "mail.%s" .Values.domain }}
+ {{- include "deploy_utils.env" .root | nindent 8 }}
+ {{- include "deploy_utils.privenv" .root | nindent 8 }}
+ ports:
+ - containerPort: {{ .Values.apps.sentry.postfix.port | default 25 }}
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+---
diff --git a/applications/sentry/deploy/templates/postgres.yaml b/applications/sentry/deploy/templates/postgres.yaml
new file mode 100644
index 00000000..1d28f13d
--- /dev/null
+++ b/applications/sentry/deploy/templates/postgres.yaml
@@ -0,0 +1,77 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: sentry-db
+ labels:
+ app: {{ .Values.apps.sentry.postgres.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.apps.sentry.postgres.name }}
+ labels:
+ app: {{ .Values.apps.sentry.postgres.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - port: 5432
+ selector:
+ app: {{ .Values.apps.sentry.postgres.name }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.apps.sentry.postgres.name }}
+ labels:
+ app: {{ .Values.apps.sentry.postgres.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Values.apps.sentry.postgres.name | quote }}
+{{ include "deploy_utils.labels" $ | indent 6 }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.apps.sentry.postgres.name }}
+{{ include "deploy_utils.labels" $ | indent 8 }}
+ spec:
+ containers:
+ - name: {{ .Values.apps.sentry.postgres.name | default "sentry-postgress" | quote }}
+ image: {{ .Values.apps.sentry.postgres.image | quote }}
+ imagePullPolicy: "IfNotPresent"
+ env:
+ - name: POSTGRES_DB
+ value: {{ .Values.apps.sentry.postgres.initialdb | quote }}
+ - name: POSTGRES_USER
+ value: {{ .Values.apps.sentry.postgres.user | quote }}
+ - name: POSTGRES_PASSWORD
+ value: {{ .Values.apps.sentry.postgres.password | quote }}
+ - name: PGDATA
+ value: {{ .Values.apps.sentry.postgres.pgdata }}
+ ports:
+ - containerPort: {{ .Values.apps.sentry.postgres.port }}
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "200m"
+ limits:
+ memory: "256Mi"
+ cpu: "500m"
+ volumeMounts:
+ - name: sentry-db
+ mountPath: {{ .Values.apps.sentry.postgres.datavolume }}
+ volumes:
+ - name: sentry-db
+ persistentVolumeClaim:
+ claimName: sentry-db
+---
\ No newline at end of file
diff --git a/applications/sentry/deploy/templates/redis.yaml b/applications/sentry/deploy/templates/redis.yaml
new file mode 100644
index 00000000..dac79e80
--- /dev/null
+++ b/applications/sentry/deploy/templates/redis.yaml
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.apps.sentry.redis.name }}
+ labels:
+ app: {{ .Values.apps.sentry.redis.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - port: {{ .Values.apps.sentry.redis.port }}
+ selector:
+ app: {{ .Values.apps.sentry.redis.name }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.apps.sentry.redis.name }}
+ labels:
+ app: {{ .Values.apps.sentry.redis.name }}
+{{ include "deploy_utils.labels" $ | indent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Values.apps.sentry.redis.name | quote }}
+{{ include "deploy_utils.labels" $ | indent 6 }}
+ template:
+ metadata:
+ {{- if .Values.apps.sentry.postfix.harvest }}
+ annotations:
+ co.elastic.logs/enabled: "true"
+ metricbeat: "true"
+ {{- end }}
+ labels:
+ app: {{ .Values.apps.sentry.redis.name }}
+{{ include "deploy_utils.labels" $ | indent 8 }}
+ spec:
+ containers:
+ - name: {{ .Values.apps.sentry.redis.name | default "sentry-redis" | quote }}
+ image: {{ .Values.apps.sentry.redis.image | quote }}
+ imagePullPolicy: "IfNotPresent"
+ ports:
+ - containerPort: {{ .Values.apps.sentry.redis.port }}
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "100m"
+ limits:
+ memory: "128Mi"
+ cpu: "200m"
+---
\ No newline at end of file
diff --git a/applications/sentry/deploy/values.yaml b/applications/sentry/deploy/values.yaml
new file mode 100644
index 00000000..db917580
--- /dev/null
+++ b/applications/sentry/deploy/values.yaml
@@ -0,0 +1,39 @@
+harness:
+ subdomain: errormonitor
+ secured: false
+ service:
+ auto: true
+ port: 9000
+ name: sentry
+ deployment:
+ auto: true
+ name: sentry
+ port: 9000
+ resources:
+ requests:
+ memory: 256Mi
+ cpu: 300m
+ limits:
+ memory: 2048Mi
+ cpu: 2000m
+
+postgres:
+ name: sentry-postgres-host
+ port: 5432
+ image: postgres:latest
+ initialdb: sentry
+ user: sentry
+ password: secret
+ datavolume: /opt/data/
+ pgdata: /opt/data/pgdata
+
+redis:
+ name: sentry-redis-host
+ image: redis:latest
+ port: 6379
+
+postfix:
+ name: sentry-postfix-host
+ image: eeacms/postfix:latest
+ port: 25
+ hostname: mail.opensourcebrain.org
diff --git a/applications/sentry/sentryrunner.sh b/applications/sentry/sentryrunner.sh
new file mode 100644
index 00000000..37921f09
--- /dev/null
+++ b/applications/sentry/sentryrunner.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+echo "**** S:INI ****"
+export SENTRY_SECRET_KEY=$(sentry config generate-secret-key)
+export SENTRY_SERVER_EMAIL=${SENTRY_EMAIL_FROM}@${DOMAIN}
+
+# create / update database
+set -e
+
+sentry upgrade --noinput
+echo "**** E:INI ****"
+
+echo "**** S:CEL ****"
+# start celery
+sleep 10
+export C_FORCE_ROOT=1
+nohup sentry run cron 2>&1 > /var/log/sentrycron.log &
+nohup sentry run worker 2>&1 > /var/log/sentryworker.log &
+echo "**** E:CEL ****"
+
+echo "**** S:USR ****"
+# create superuser if not exists
+set +e
+sentry exec -c "
+from sentry.models import User
+try:
+ user=User.objects.all()[0]
+except IndexError:
+ # no user found
+ quit(1)
+quit(0)
+"
+userExists=$?
+set -e
+
+if [ $userExists -eq 1 ]; then
+sleep 15
+echo creating new user
+sentry createuser --email ${SENTRY_ADMIN_USER}@${DOMAIN} --password ${SENTRY_ADMIN_PASSWORD} --superuser --no-input
+fi
+echo "**** E:USR ****"
+
+echo "**** S:RUN ****"
+# run sentry
+sentry run web
+echo "**** E:RUN ****"
diff --git a/applications/volumemanager/README.md b/applications/volumemanager/README.md
new file mode 100644
index 00000000..3c6f5b85
--- /dev/null
+++ b/applications/volumemanager/README.md
@@ -0,0 +1,33 @@
+# Volume manager backend
+The volume manager is pure microservice rest api.
+It's defined with API first approach with Openapi v3 and implemented as a Flask application.
+
+
+## Build / run
+
+```
+cd server
+
+# setup virtual env
+python3.7 -m venv venv
+
+# install dependencies
+pip install --no-cache-dir -r requirements.txt
+
+# activate virtual env
+source venv/bin/activate
+
+# run flask backend
+export FLASK_ENV=development
+python -m volumemanager
+```
+
+Open your browser and go to http://0.0.0.0:8080/api/ui/ to see the REST api ui
+
+When running in Cloudharness the url for the api ui is https://volumemanager.cloudharness.metacell.us/api/ui/
+
+## Tech
+
+Volume manager uses openapi for definition of the (REST) api .
+
+This application is based on Flask
diff --git a/applications/volumemanager/api/config.json b/applications/volumemanager/api/config.json
new file mode 100644
index 00000000..e8f7d638
--- /dev/null
+++ b/applications/volumemanager/api/config.json
@@ -0,0 +1,3 @@
+{
+ "packageName": "volumemanager"
+}
\ No newline at end of file
diff --git a/applications/volumemanager/api/openapi.yaml b/applications/volumemanager/api/openapi.yaml
new file mode 100644
index 00000000..50fcc805
--- /dev/null
+++ b/applications/volumemanager/api/openapi.yaml
@@ -0,0 +1,113 @@
+openapi: 3.0.0
+info:
+ description: CloudHarness Volumes manager API
+ license:
+ name: UNLICENSED
+ title: Volumes manager API
+ version: 0.1.0
+servers:
+- description: SwaggerHub API Auto Mocking
+ url: /api
+tags:
+- description: rest API for Volumes manager
+ name: rest
+
+paths:
+ /pvc:
+ post:
+ operationId: pvc_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaimCreate'
+ description: The Persistent Volume Claim to create.
+ required: true
+ responses:
+ "201":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaim'
+ description: Save successful.
+ "400":
+ description: The Persistent Volume Claim already exists.
+ security:
+ - bearerAuth: []
+ summary: Create a Persistent Volume Claim in Kubernetes
+ tags:
+ - rest
+ x-openapi-router-controller: volumemanager.controllers.rest_controller
+ /pvc/{name}:
+ get:
+ operationId: pvc_name_get
+ parameters:
+ - description: The name of the Persistent Volume Claim to be retrieved
+ explode: false
+ in: path
+ name: name
+ required: true
+ schema:
+ type: string
+ style: simple
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaim'
+ description: The Persistent Volume Claim.
+ "404":
+ description: The Persistent Volume Claim was not found.
+ security:
+ - bearerAuth: []
+ summary: Retrieve a Persistent Volume Claim from the Kubernetes repository.
+ tags:
+ - rest
+ x-openapi-router-controller: volumemanager.controllers.rest_controller
+components:
+ schemas:
+ PersistentVolumeClaimCreate:
+ example:
+ size: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ name: pvc-1
+ properties:
+ name:
+ description: Unique name for the Persisten Volume Claim to create.
+ example: pvc-1
+ type: string
+ size:
+ description: The size of the Persistent Volume Claim to create.
+ example: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ type: string
+ type: object
+ PersistentVolumeClaim:
+ example:
+ size: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ name: pvc-1
+ namespace: ch
+ accessmode: ReadWriteMany
+ properties:
+ name:
+ description: Unique name for the Persisten Volume Claim
+ example: pvc-1
+ type: string
+ namespace:
+ description: The namespace where the Persistent Volume Claim resides in
+ example: ch
+ type: string
+ accessmode:
+ description: The accessmode of the Persistent Volume Claim
+ example: ReadWriteMany
+ type: string
+ size:
+ description: The size of the Persistent Volume Claim.
+ example: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ type: string
+ type: object
+ securitySchemes:
+ bearerAuth:
+ bearerFormat: JWT
+ scheme: bearer
+ type: http
+ x-bearerInfoFunc: cloudharness.auth.decode_token
diff --git a/applications/volumemanager/deploy/values.yaml b/applications/volumemanager/deploy/values.yaml
new file mode 100644
index 00000000..8557ce18
--- /dev/null
+++ b/applications/volumemanager/deploy/values.yaml
@@ -0,0 +1,12 @@
+harness:
+ name: volumemanager
+ subdomain: volumemanager
+ service:
+ port: 8080
+ auto: true
+ name: volumemanager
+ secured: false
+ sentry: true
+ deployment:
+ auto: true
+ port: 8080
diff --git a/applications/workflows/src/.dockerignore b/applications/volumemanager/server/.dockerignore
similarity index 100%
rename from applications/workflows/src/.dockerignore
rename to applications/volumemanager/server/.dockerignore
diff --git a/applications/workflows/src/.gitignore b/applications/volumemanager/server/.gitignore
similarity index 100%
rename from applications/workflows/src/.gitignore
rename to applications/volumemanager/server/.gitignore
diff --git a/applications/samples/src/.openapi-generator-ignore b/applications/volumemanager/server/.openapi-generator-ignore
similarity index 95%
rename from applications/samples/src/.openapi-generator-ignore
rename to applications/volumemanager/server/.openapi-generator-ignore
index 5cc13cc6..7484ee59 100644
--- a/applications/samples/src/.openapi-generator-ignore
+++ b/applications/volumemanager/server/.openapi-generator-ignore
@@ -21,7 +21,3 @@
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md
-setup.py
-*/controllers/*
-*/models/*
-Dockerfile
\ No newline at end of file
diff --git a/applications/workflows/src/.travis.yml b/applications/volumemanager/server/.travis.yml
similarity index 100%
rename from applications/workflows/src/.travis.yml
rename to applications/volumemanager/server/.travis.yml
diff --git a/applications/samples/src/Dockerfile b/applications/volumemanager/server/Dockerfile
similarity index 81%
rename from applications/samples/src/Dockerfile
rename to applications/volumemanager/server/Dockerfile
index d74b98f7..4a3039e2 100644
--- a/applications/samples/src/Dockerfile
+++ b/applications/volumemanager/server/Dockerfile
@@ -2,6 +2,9 @@ ARG REGISTRY
ARG TAG=latest
FROM ${REGISTRY}cloudharness-base:${TAG}
+RUN apk update
+RUN apk upgrade
+
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
@@ -15,4 +18,4 @@ EXPOSE 8080
ENTRYPOINT ["python3"]
-CMD ["-m", "api_samples"]
\ No newline at end of file
+CMD ["-m", "volumemanager"]
\ No newline at end of file
diff --git a/applications/samples/src/README.md b/applications/volumemanager/server/README.md
similarity index 91%
rename from applications/samples/src/README.md
rename to applications/volumemanager/server/README.md
index b1e6a1f9..c102460f 100644
--- a/applications/samples/src/README.md
+++ b/applications/volumemanager/server/README.md
@@ -15,7 +15,7 @@ To run the server, please execute the following from the root directory:
```
pip3 install -r requirements.txt
-python3 -m api_samples
+python3 -m volumemanager
```
and open your browser to here:
@@ -42,8 +42,8 @@ To run the server on a Docker container, please execute the following from the r
```bash
# building the image
-docker build -t api_samples .
+docker build -t volumemanager .
# starting up a container
-docker run -p 8080:8080 api_samples
+docker run -p 8080:8080 volumemanager
```
\ No newline at end of file
diff --git a/applications/workflows/src/git_push.sh b/applications/volumemanager/server/git_push.sh
similarity index 100%
rename from applications/workflows/src/git_push.sh
rename to applications/volumemanager/server/git_push.sh
diff --git a/applications/volumemanager/server/requirements.txt b/applications/volumemanager/server/requirements.txt
new file mode 100644
index 00000000..72ed547c
--- /dev/null
+++ b/applications/volumemanager/server/requirements.txt
@@ -0,0 +1,10 @@
+connexion[swagger-ui] >= 2.6.0; python_version>="3.6"
+# 2.3 is the last version that supports python 3.4-3.5
+connexion[swagger-ui] <= 2.3.0; python_version=="3.5" or python_version=="3.4"
+# connexion requires werkzeug but connexion < 2.4.0 does not install werkzeug
+# we must peg werkzeug versions below to fix connexion
+# https://github.com/zalando/connexion/pull/1044
+werkzeug == 0.16.1; python_version=="3.5" or python_version=="3.4"
+swagger-ui-bundle >= 0.0.2
+python_dateutil >= 2.6.0
+setuptools >= 21.0.0
diff --git a/applications/workflows/src/setup.py b/applications/volumemanager/server/setup.py
similarity index 71%
rename from applications/workflows/src/setup.py
rename to applications/volumemanager/server/setup.py
index 9bd6a02e..efe2a815 100644
--- a/applications/workflows/src/setup.py
+++ b/applications/volumemanager/server/setup.py
@@ -3,7 +3,7 @@
import sys
from setuptools import setup, find_packages
-NAME = "workflows_api"
+NAME = "volumemanager"
VERSION = "1.0.0"
# To install the library, run the following
@@ -22,18 +22,18 @@
setup(
name=NAME,
version=VERSION,
- description="Workflows API",
- author_email="cloudharness@metacell.us",
+ description="Volumes manager API",
+ author_email="",
url="",
- keywords=["OpenAPI", "Workflows API"],
+ keywords=["OpenAPI", "Volumes manager API"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
- 'console_scripts': ['workflows_api=workflows_api.__main__:main']},
+ 'console_scripts': ['volumemanager=volumemanager.__main__:main']},
long_description="""\
- Workflows API
+ CloudHarness Volumes manager API
"""
)
diff --git a/applications/samples/src/test-requirements.txt b/applications/volumemanager/server/test-requirements.txt
similarity index 84%
rename from applications/samples/src/test-requirements.txt
rename to applications/volumemanager/server/test-requirements.txt
index a2626d87..0970f28c 100644
--- a/applications/samples/src/test-requirements.txt
+++ b/applications/volumemanager/server/test-requirements.txt
@@ -1,4 +1,4 @@
pytest~=4.6.7 # needed for python 2.7+3.4
pytest-cov>=2.8.1
pytest-randomly==1.2.3 # needed for python 2.7+3.4
-flask_testing==0.6.1
\ No newline at end of file
+Flask-Testing==0.8.0
diff --git a/applications/workflows/src/tox.ini b/applications/volumemanager/server/tox.ini
similarity index 66%
rename from applications/workflows/src/tox.ini
rename to applications/volumemanager/server/tox.ini
index e6dce35b..cf2d20a3 100644
--- a/applications/workflows/src/tox.ini
+++ b/applications/volumemanager/server/tox.ini
@@ -1,9 +1,11 @@
[tox]
envlist = py3
+skipsdist=True
[testenv]
deps=-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+ {toxinidir}
commands=
- pytest --cov=workflows_api
\ No newline at end of file
+ pytest --cov=volumemanager
diff --git a/applications/workflows/src/workflows_api/controllers/__init__.py b/applications/volumemanager/server/volumemanager/__init__.py
similarity index 100%
rename from applications/workflows/src/workflows_api/controllers/__init__.py
rename to applications/volumemanager/server/volumemanager/__init__.py
diff --git a/applications/workflows/src/workflows_api/__main__.py b/applications/volumemanager/server/volumemanager/__main__.py
similarity index 76%
rename from applications/workflows/src/workflows_api/__main__.py
rename to applications/volumemanager/server/volumemanager/__main__.py
index 40782233..866d5627 100644
--- a/applications/workflows/src/workflows_api/__main__.py
+++ b/applications/volumemanager/server/volumemanager/__main__.py
@@ -2,14 +2,14 @@
import connexion
-from workflows_api import encoder
+from volumemanager import encoder
def main():
app = connexion.App(__name__, specification_dir='./openapi/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('openapi.yaml',
- arguments={'title': 'Workflows API'},
+ arguments={'title': 'Volumes manager API'},
pythonic_params=True)
app.run(port=8080)
diff --git a/applications/workflows/src/workflows_api/service/__init__.py b/applications/volumemanager/server/volumemanager/controllers/__init__.py
similarity index 100%
rename from applications/workflows/src/workflows_api/service/__init__.py
rename to applications/volumemanager/server/volumemanager/controllers/__init__.py
diff --git a/applications/volumemanager/server/volumemanager/controllers/rest_controller.py b/applications/volumemanager/server/volumemanager/controllers/rest_controller.py
new file mode 100644
index 00000000..879fb743
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/controllers/rest_controller.py
@@ -0,0 +1,50 @@
+import connexion
+import six
+import flask
+
+from cloudharness.service.pvc import create_persistent_volume_claim, get_persistent_volume_claim
+
+from volumemanager.models.persistent_volume_claim import PersistentVolumeClaim # noqa: E501
+from volumemanager.models.persistent_volume_claim_create import PersistentVolumeClaimCreate # noqa: E501
+from volumemanager import util
+
+def pvc_name_get(name): # noqa: E501
+ """Used to retrieve a Persistent Volume Claim from the Kubernetes repository.
+
+ # noqa: E501
+
+ :param name: The name of the Persistent Volume Claim to be retrieved
+ :type name: str
+
+ :rtype: PersistentVolumeClaim
+ """
+ pvc = get_persistent_volume_claim(name)
+ if not pvc:
+ return f"Persistent Volume Claim with name {name} not found.", 404
+
+ pvc = PersistentVolumeClaim(
+ name=pvc.metadata.name,
+ namespace=pvc.metadata.namespace,
+ accessmode=pvc.status.access_modes[0],
+ size=pvc.status.capacity.get('storage','')
+ )
+ return pvc
+
+
+def pvc_post(): # noqa: E501
+ """Used to create a Persistent Volume Claim in Kubernetes
+
+ # noqa: E501
+
+ :param persistent_volume_claim_create: The Persistent Volume Claim to create.
+ :type persistent_volume_claim_create: dict | bytes
+
+ :rtype: PersistentVolumeClaim
+ """
+ if connexion.request.is_json:
+ persistent_volume_claim_create = PersistentVolumeClaimCreate.from_dict(connexion.request.get_json()) # noqa: E501
+ create_persistent_volume_claim(
+ name=persistent_volume_claim_create.name,
+ size=persistent_volume_claim_create.size,
+ logger=flask.current_app.logger)
+ return 'Saved!'
diff --git a/applications/samples/server/api_samples/controllers/security_controller_.py b/applications/volumemanager/server/volumemanager/controllers/security_controller_.py
similarity index 100%
rename from applications/samples/server/api_samples/controllers/security_controller_.py
rename to applications/volumemanager/server/volumemanager/controllers/security_controller_.py
diff --git a/applications/workflows/src/workflows_api/encoder.py b/applications/volumemanager/server/volumemanager/encoder.py
similarity index 91%
rename from applications/workflows/src/workflows_api/encoder.py
rename to applications/volumemanager/server/volumemanager/encoder.py
index ffc6e492..50e16388 100644
--- a/applications/workflows/src/workflows_api/encoder.py
+++ b/applications/volumemanager/server/volumemanager/encoder.py
@@ -1,7 +1,7 @@
from connexion.apps.flask_app import FlaskJSONEncoder
import six
-from workflows_api.models.base_model_ import Model
+from volumemanager.models.base_model_ import Model
class JSONEncoder(FlaskJSONEncoder):
diff --git a/applications/volumemanager/server/volumemanager/models/__init__.py b/applications/volumemanager/server/volumemanager/models/__init__.py
new file mode 100644
index 00000000..48dcdcbc
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/models/__init__.py
@@ -0,0 +1,7 @@
+# coding: utf-8
+
+# flake8: noqa
+from __future__ import absolute_import
+# import models into model package
+from volumemanager.models.persistent_volume_claim import PersistentVolumeClaim
+from volumemanager.models.persistent_volume_claim_create import PersistentVolumeClaimCreate
diff --git a/applications/workflows/src/workflows_api/models/base_model_.py b/applications/volumemanager/server/volumemanager/models/base_model_.py
similarity index 98%
rename from applications/workflows/src/workflows_api/models/base_model_.py
rename to applications/volumemanager/server/volumemanager/models/base_model_.py
index d532ae7b..ea49046e 100644
--- a/applications/workflows/src/workflows_api/models/base_model_.py
+++ b/applications/volumemanager/server/volumemanager/models/base_model_.py
@@ -3,7 +3,7 @@
import six
import typing
-from workflows_api import util
+from volumemanager import util
T = typing.TypeVar('T')
diff --git a/applications/volumemanager/server/volumemanager/models/persistent_volume_claim.py b/applications/volumemanager/server/volumemanager/models/persistent_volume_claim.py
new file mode 100644
index 00000000..36664245
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/models/persistent_volume_claim.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from volumemanager.models.base_model_ import Model
+from volumemanager import util
+
+
+class PersistentVolumeClaim(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, name=None, namespace=None, accessmode=None, size=None): # noqa: E501
+ """PersistentVolumeClaim - a model defined in OpenAPI
+
+ :param name: The name of this PersistentVolumeClaim. # noqa: E501
+ :type name: str
+ :param namespace: The namespace of this PersistentVolumeClaim. # noqa: E501
+ :type namespace: str
+ :param accessmode: The accessmode of this PersistentVolumeClaim. # noqa: E501
+ :type accessmode: str
+ :param size: The size of this PersistentVolumeClaim. # noqa: E501
+ :type size: str
+ """
+ self.openapi_types = {
+ 'name': str,
+ 'namespace': str,
+ 'accessmode': str,
+ 'size': str
+ }
+
+ self.attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'accessmode': 'accessmode',
+ 'size': 'size'
+ }
+
+ self._name = name
+ self._namespace = namespace
+ self._accessmode = accessmode
+ self._size = size
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'PersistentVolumeClaim':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The PersistentVolumeClaim of this PersistentVolumeClaim. # noqa: E501
+ :rtype: PersistentVolumeClaim
+ """
+ return util.deserialize_model(dikt, cls)
+
+ @property
+ def name(self):
+ """Gets the name of this PersistentVolumeClaim.
+
+ Unique name for the Persisten Volume Claim # noqa: E501
+
+ :return: The name of this PersistentVolumeClaim.
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this PersistentVolumeClaim.
+
+ Unique name for the Persisten Volume Claim # noqa: E501
+
+ :param name: The name of this PersistentVolumeClaim.
+ :type name: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this PersistentVolumeClaim.
+
+ The namespace where the Persistent Volume Claim resides in # noqa: E501
+
+ :return: The namespace of this PersistentVolumeClaim.
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this PersistentVolumeClaim.
+
+ The namespace where the Persistent Volume Claim resides in # noqa: E501
+
+ :param namespace: The namespace of this PersistentVolumeClaim.
+ :type namespace: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def accessmode(self):
+ """Gets the accessmode of this PersistentVolumeClaim.
+
+ The accessmode of the Persistent Volume Claim # noqa: E501
+
+ :return: The accessmode of this PersistentVolumeClaim.
+ :rtype: str
+ """
+ return self._accessmode
+
+ @accessmode.setter
+ def accessmode(self, accessmode):
+ """Sets the accessmode of this PersistentVolumeClaim.
+
+ The accessmode of the Persistent Volume Claim # noqa: E501
+
+ :param accessmode: The accessmode of this PersistentVolumeClaim.
+ :type accessmode: str
+ """
+
+ self._accessmode = accessmode
+
+ @property
+ def size(self):
+ """Gets the size of this PersistentVolumeClaim.
+
+ The size of the Persistent Volume Claim. # noqa: E501
+
+ :return: The size of this PersistentVolumeClaim.
+ :rtype: str
+ """
+ return self._size
+
+ @size.setter
+ def size(self, size):
+ """Sets the size of this PersistentVolumeClaim.
+
+ The size of the Persistent Volume Claim. # noqa: E501
+
+ :param size: The size of this PersistentVolumeClaim.
+ :type size: str
+ """
+
+ self._size = size
diff --git a/applications/volumemanager/server/volumemanager/models/persistent_volume_claim_create.py b/applications/volumemanager/server/volumemanager/models/persistent_volume_claim_create.py
new file mode 100644
index 00000000..dd84f5ee
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/models/persistent_volume_claim_create.py
@@ -0,0 +1,94 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from volumemanager.models.base_model_ import Model
+from volumemanager import util
+
+
+class PersistentVolumeClaimCreate(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, name=None, size=None): # noqa: E501
+ """PersistentVolumeClaimCreate - a model defined in OpenAPI
+
+ :param name: The name of this PersistentVolumeClaimCreate. # noqa: E501
+ :type name: str
+ :param size: The size of this PersistentVolumeClaimCreate. # noqa: E501
+ :type size: str
+ """
+ self.openapi_types = {
+ 'name': str,
+ 'size': str
+ }
+
+ self.attribute_map = {
+ 'name': 'name',
+ 'size': 'size'
+ }
+
+ self._name = name
+ self._size = size
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'PersistentVolumeClaimCreate':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The PersistentVolumeClaimCreate of this PersistentVolumeClaimCreate. # noqa: E501
+ :rtype: PersistentVolumeClaimCreate
+ """
+ return util.deserialize_model(dikt, cls)
+
+ @property
+ def name(self):
+ """Gets the name of this PersistentVolumeClaimCreate.
+
+ Unique name for the Persisten Volume Claim to create. # noqa: E501
+
+ :return: The name of this PersistentVolumeClaimCreate.
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this PersistentVolumeClaimCreate.
+
+ Unique name for the Persisten Volume Claim to create. # noqa: E501
+
+ :param name: The name of this PersistentVolumeClaimCreate.
+ :type name: str
+ """
+
+ self._name = name
+
+ @property
+ def size(self):
+ """Gets the size of this PersistentVolumeClaimCreate.
+
+ The size of the Persistent Volume Claim to create. # noqa: E501
+
+ :return: The size of this PersistentVolumeClaimCreate.
+ :rtype: str
+ """
+ return self._size
+
+ @size.setter
+ def size(self, size):
+ """Sets the size of this PersistentVolumeClaimCreate.
+
+ The size of the Persistent Volume Claim to create. # noqa: E501
+
+ :param size: The size of this PersistentVolumeClaimCreate.
+ :type size: str
+ """
+
+ self._size = size
diff --git a/applications/volumemanager/server/volumemanager/openapi/openapi.yaml b/applications/volumemanager/server/volumemanager/openapi/openapi.yaml
new file mode 100644
index 00000000..d8ea2cc4
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/openapi/openapi.yaml
@@ -0,0 +1,112 @@
+openapi: 3.0.0
+info:
+ description: CloudHarness Volumes manager API
+ license:
+ name: UNLICENSED
+ title: Volumes manager API
+ version: 0.1.0
+servers:
+- description: SwaggerHub API Auto Mocking
+ url: /api
+tags:
+- description: rest API for Volumes manager
+ name: rest
+paths:
+ /pvc:
+ post:
+ operationId: pvc_post
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaimCreate'
+ description: The Persistent Volume Claim to create.
+ required: true
+ responses:
+ "201":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaim'
+ description: Save successful.
+ "400":
+ description: The Persistent Volume Claim already exists.
+ security:
+ - bearerAuth: []
+ summary: Create a Persistent Volume Claim in Kubernetes
+ tags:
+ - rest
+ x-openapi-router-controller: volumemanager.controllers.rest_controller
+ /pvc/{name}:
+ get:
+ operationId: pvc_name_get
+ parameters:
+ - description: The name of the Persistent Volume Claim to be retrieved
+ explode: false
+ in: path
+ name: name
+ required: true
+ schema:
+ type: string
+ style: simple
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/PersistentVolumeClaim'
+ description: The Persistent Volume Claim.
+ "404":
+ description: The Persistent Volume Claim was not found.
+ security:
+ - bearerAuth: []
+ summary: Retrieve a Persistent Volume Claim from the Kubernetes repository.
+ tags:
+ - rest
+ x-openapi-router-controller: volumemanager.controllers.rest_controller
+components:
+ schemas:
+ PersistentVolumeClaimCreate:
+ example:
+ size: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ name: pvc-1
+ properties:
+ name:
+ description: Unique name for the Persisten Volume Claim to create.
+ example: pvc-1
+ type: string
+ size:
+ description: The size of the Persistent Volume Claim to create.
+ example: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ type: string
+ type: object
+ PersistentVolumeClaim:
+ example:
+ size: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ name: pvc-1
+ namespace: ch
+ accessmode: ReadWriteMany
+ properties:
+ name:
+ description: Unique name for the Persisten Volume Claim
+ example: pvc-1
+ type: string
+ namespace:
+ description: The namespace where the Persistent Volume Claim resides in
+ example: ch
+ type: string
+ accessmode:
+ description: The accessmode of the Persistent Volume Claim
+ example: ReadWriteMany
+ type: string
+ size:
+ description: The size of the Persistent Volume Claim.
+ example: 2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)
+ type: string
+ type: object
+ securitySchemes:
+ bearerAuth:
+ bearerFormat: JWT
+ scheme: bearer
+ type: http
+ x-bearerInfoFunc: cloudharness.auth.decode_token
diff --git a/applications/workflows/src/workflows_api/test/__init__.py b/applications/volumemanager/server/volumemanager/test/__init__.py
similarity index 89%
rename from applications/workflows/src/workflows_api/test/__init__.py
rename to applications/volumemanager/server/volumemanager/test/__init__.py
index 1f7852ce..41ed7a62 100644
--- a/applications/workflows/src/workflows_api/test/__init__.py
+++ b/applications/volumemanager/server/volumemanager/test/__init__.py
@@ -3,7 +3,7 @@
import connexion
from flask_testing import TestCase
-from workflows_api.encoder import JSONEncoder
+from volumemanager.encoder import JSONEncoder
class BaseTestCase(TestCase):
diff --git a/applications/volumemanager/server/volumemanager/test/test_rest_controller.py b/applications/volumemanager/server/volumemanager/test/test_rest_controller.py
new file mode 100644
index 00000000..d62a1dbf
--- /dev/null
+++ b/applications/volumemanager/server/volumemanager/test/test_rest_controller.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+import unittest
+
+from flask import json
+from six import BytesIO
+
+from volumemanager.models.persistent_volume_claim import PersistentVolumeClaim # noqa: E501
+from volumemanager.models.persistent_volume_claim_create import PersistentVolumeClaimCreate # noqa: E501
+from volumemanager.test import BaseTestCase
+
+
+class TestRestController(BaseTestCase):
+ """RestController integration test stubs"""
+
+ def test_pvc_name_get(self):
+ """Test case for pvc_name_get
+
+ Used to retrieve a Persistent Volume Claim from the Kubernetes repository.
+ """
+ headers = {
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer special-key',
+ }
+ response = self.client.open(
+ '/api/pvc/{name}'.format(name='name_example'),
+ method='GET',
+ headers=headers)
+ self.assert200(response,
+ 'Response body is : ' + response.data.decode('utf-8'))
+
+ def test_pvc_post(self):
+ """Test case for pvc_post
+
+ Used to create a Persistent Volume Claim in Kubernetes
+ """
+ persistent_volume_claim_create = {
+ "size" : "2Gi (see also https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/resources.md#resource-quantities)",
+ "name" : "pvc-1"
+}
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer special-key',
+ }
+ response = self.client.open(
+ '/api/pvc',
+ method='POST',
+ headers=headers,
+ data=json.dumps(persistent_volume_claim_create),
+ content_type='application/json')
+ self.assert200(response,
+ 'Response body is : ' + response.data.decode('utf-8'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/applications/workflows/src/workflows_api/typing_utils.py b/applications/volumemanager/server/volumemanager/typing_utils.py
similarity index 100%
rename from applications/workflows/src/workflows_api/typing_utils.py
rename to applications/volumemanager/server/volumemanager/typing_utils.py
diff --git a/applications/workflows/src/workflows_api/util.py b/applications/volumemanager/server/volumemanager/util.py
similarity index 98%
rename from applications/workflows/src/workflows_api/util.py
rename to applications/volumemanager/server/volumemanager/util.py
index 4c21578d..50cc91eb 100644
--- a/applications/workflows/src/workflows_api/util.py
+++ b/applications/volumemanager/server/volumemanager/util.py
@@ -2,7 +2,7 @@
import six
import typing
-from workflows_api import typing_utils
+from volumemanager import typing_utils
def _deserialize(data, klass):
diff --git a/applications/workflows/deploy/values.yaml b/applications/workflows/deploy/values.yaml
index 232fe98f..a53fb064 100644
--- a/applications/workflows/deploy/values.yaml
+++ b/applications/workflows/deploy/values.yaml
@@ -1,6 +1,9 @@
-harvest: false
-port: 8080
-subdomain: workflows
-autoservice: true
-autodeploy: true
-serviceaccount: argo-workflows
\ No newline at end of file
+harness:
+ subdomain: workflows
+ secured: false
+ service:
+ port: 8080
+ auto: true
+ deployment:
+ auto: true
+ port: 8080
\ No newline at end of file
diff --git a/applications/workflows/server/.openapi-generator-ignore b/applications/workflows/server/.openapi-generator-ignore
index 7484ee59..b09fd633 100644
--- a/applications/workflows/server/.openapi-generator-ignore
+++ b/applications/workflows/server/.openapi-generator-ignore
@@ -21,3 +21,7 @@
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md
+
+Dockerfile
+*/controllers/*
+*/models/*
\ No newline at end of file
diff --git a/applications/workflows/server/Dockerfile b/applications/workflows/server/Dockerfile
index 8c6ebffd..971916ec 100644
--- a/applications/workflows/server/Dockerfile
+++ b/applications/workflows/server/Dockerfile
@@ -1,4 +1,6 @@
-FROM python:3-alpine
+ARG REGISTRY
+ARG TAG=latest
+FROM ${REGISTRY}cloudharness-base:${TAG}
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
diff --git a/applications/workflows/server/__init__.py b/applications/workflows/server/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/applications/workflows/server/workflows_api/controllers/create_and_access_controller.py b/applications/workflows/server/workflows_api/controllers/create_and_access_controller.py
index f7226c97..c2f1f5d5 100644
--- a/applications/workflows/server/workflows_api/controllers/create_and_access_controller.py
+++ b/applications/workflows/server/workflows_api/controllers/create_and_access_controller.py
@@ -4,8 +4,14 @@
from workflows_api.models.operation import Operation # noqa: E501
from workflows_api.models.operation_search_result import OperationSearchResult # noqa: E501
from workflows_api.models.operation_status import OperationStatus # noqa: E501
+from workflows_api.models.search_result_data import SearchResultData # noqa: E501
from workflows_api import util
+from workflows_api.service import workflow_service
+from workflows_api.service.workflow_service import OperationNotFound, OperationException, BadParam
+
+from cloudharness import log
+
def delete_operation(name): # noqa: E501
"""deletes operation by name
@@ -17,7 +23,13 @@ def delete_operation(name): # noqa: E501
:rtype: None
"""
- return 'do some magic!'
+ try:
+ workflow_service.delete_operation(name)
+ except OperationNotFound as e:
+ return (f'{name} not found', 404)
+ except OperationException as e:
+ log.error(f'Unhandled remote exception while deleting workflow {name}', exc_info=e)
+ return f'Unexpected error', e.status
def get_operation(name): # noqa: E501
@@ -30,7 +42,13 @@ def get_operation(name): # noqa: E501
:rtype: List[Operation]
"""
- return 'do some magic!'
+ try:
+ return workflow_service.get_operation(name)
+ except OperationNotFound as e:
+ return (f'{name} not found', 404)
+ except OperationException as e:
+ log.error(f'Unhandled remote exception while retrieving workflow {name}', exc_info=e)
+ return f'Unexpected error', e.status
def list_operations(status=None, previous_search_token=None, limit=None): # noqa: E501
@@ -40,17 +58,20 @@ def list_operations(status=None, previous_search_token=None, limit=None): # noq
:param status: filter by status
:type status: dict | bytes
- :param previous_search_token: continue previous search (pagination chunks)
- :type previous_search_token: str
+ :param previous_search: continue previous search (pagination chunks)
+ :type previous_search: dict | bytes
:param limit: maximum number of records to return per page
:type limit: int
:rtype: OperationSearchResult
"""
- if connexion.request.is_json:
- status = OperationStatus.from_dict(connexion.request.get_json()) # noqa: E501
- return 'do some magic!'
-
+ try:
+ return workflow_service.list_operations(status, continue_token=previous_search_token, limit=limit)
+ except BadParam as e:
+ return (f'Bad parameter: {e.param}, {e}', e.status)
+ except OperationException as e:
+ log.error(f'Unhandled remote exception while retrieving workflows', exc_info=e)
+ return '', e.status
def log_operation(name): # noqa: E501
"""get operation by name
@@ -62,4 +83,8 @@ def log_operation(name): # noqa: E501
:rtype: str
"""
- return 'do some magic!'
+ if not name or name == '':
+ return ''
+
+ return workflow_service.log_operation(name)
+
diff --git a/applications/workflows/server/workflows_api/models/operation.py b/applications/workflows/server/workflows_api/models/operation.py
index c4d7ab75..66110437 100644
--- a/applications/workflows/server/workflows_api/models/operation.py
+++ b/applications/workflows/server/workflows_api/models/operation.py
@@ -6,10 +6,8 @@
from typing import List, Dict # noqa: F401
from workflows_api.models.base_model_ import Model
-from workflows_api.models.operation_status import OperationStatus
from workflows_api import util
-
-from workflows_api.models.operation_status import OperationStatus # noqa: E501
+from .operation_status import OperationStatus
class Operation(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
diff --git a/applications/workflows/server/workflows_api/models/operation_search_result.py b/applications/workflows/server/workflows_api/models/operation_search_result.py
index d51b1b9b..7c5f9770 100644
--- a/applications/workflows/server/workflows_api/models/operation_search_result.py
+++ b/applications/workflows/server/workflows_api/models/operation_search_result.py
@@ -6,12 +6,9 @@
from typing import List, Dict # noqa: F401
from workflows_api.models.base_model_ import Model
-from workflows_api.models.operation import Operation
-from workflows_api.models.search_result_data import SearchResultData
from workflows_api import util
-
-from workflows_api.models.operation import Operation # noqa: E501
-from workflows_api.models.search_result_data import SearchResultData # noqa: E501
+from workflows_api.models.search_result_data import SearchResultData
+from workflows_api.models.operation import Operation
class OperationSearchResult(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
diff --git a/applications/workflows/server/workflows_api/models/operation_status.py b/applications/workflows/server/workflows_api/models/operation_status.py
index e4492c7d..1b2ba6c7 100644
--- a/applications/workflows/server/workflows_api/models/operation_status.py
+++ b/applications/workflows/server/workflows_api/models/operation_status.py
@@ -24,6 +24,7 @@ class OperationStatus(Model):
SUCCEEDED = "Succeeded"
SKIPPED = "Skipped"
FAILED = "Failed"
+
def __init__(self): # noqa: E501
"""OperationStatus - a model defined in OpenAPI
diff --git a/applications/workflows/src/workflows_api/models/operation_type.py b/applications/workflows/server/workflows_api/models/operation_type.py
similarity index 100%
rename from applications/workflows/src/workflows_api/models/operation_type.py
rename to applications/workflows/server/workflows_api/models/operation_type.py
diff --git a/applications/workflows/server/workflows_api/service/__init__.py b/applications/workflows/server/workflows_api/service/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/applications/workflows/src/workflows_api/service/workflow_service.py b/applications/workflows/server/workflows_api/service/workflow_service.py
similarity index 100%
rename from applications/workflows/src/workflows_api/service/workflow_service.py
rename to applications/workflows/server/workflows_api/service/workflow_service.py
diff --git a/applications/workflows/src/requirements.txt b/applications/workflows/src/requirements.txt
deleted file mode 100644
index 2639eedf..00000000
--- a/applications/workflows/src/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-connexion >= 2.6.0; python_version>="3.6"
-connexion >= 2.3.0; python_version=="3.5"
-connexion >= 2.3.0; python_version=="3.4"
-connexion == 2.4.0; python_version<="2.7"
-swagger-ui-bundle >= 0.0.2
-python_dateutil >= 2.6.0
-setuptools >= 21.0.0
diff --git a/applications/workflows/src/workflows_api/controllers/create_and_access_controller.py b/applications/workflows/src/workflows_api/controllers/create_and_access_controller.py
deleted file mode 100644
index c2f1f5d5..00000000
--- a/applications/workflows/src/workflows_api/controllers/create_and_access_controller.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import connexion
-import six
-
-from workflows_api.models.operation import Operation # noqa: E501
-from workflows_api.models.operation_search_result import OperationSearchResult # noqa: E501
-from workflows_api.models.operation_status import OperationStatus # noqa: E501
-from workflows_api.models.search_result_data import SearchResultData # noqa: E501
-from workflows_api import util
-
-from workflows_api.service import workflow_service
-from workflows_api.service.workflow_service import OperationNotFound, OperationException, BadParam
-
-from cloudharness import log
-
-
-def delete_operation(name): # noqa: E501
- """deletes operation by name
-
- delete operation by its name # noqa: E501
-
- :param name:
- :type name: str
-
- :rtype: None
- """
- try:
- workflow_service.delete_operation(name)
- except OperationNotFound as e:
- return (f'{name} not found', 404)
- except OperationException as e:
- log.error(f'Unhandled remote exception while deleting workflow {name}', exc_info=e)
- return f'Unexpected error', e.status
-
-
-def get_operation(name): # noqa: E501
- """get operation by name
-
- retrieves an operation by its name # noqa: E501
-
- :param name:
- :type name: str
-
- :rtype: List[Operation]
- """
- try:
- return workflow_service.get_operation(name)
- except OperationNotFound as e:
- return (f'{name} not found', 404)
- except OperationException as e:
- log.error(f'Unhandled remote exception while retrieving workflow {name}', exc_info=e)
- return f'Unexpected error', e.status
-
-
-def list_operations(status=None, previous_search_token=None, limit=None): # noqa: E501
- """lists operations
-
- see all operations for the user # noqa: E501
-
- :param status: filter by status
- :type status: dict | bytes
- :param previous_search: continue previous search (pagination chunks)
- :type previous_search: dict | bytes
- :param limit: maximum number of records to return per page
- :type limit: int
-
- :rtype: OperationSearchResult
- """
- try:
- return workflow_service.list_operations(status, continue_token=previous_search_token, limit=limit)
- except BadParam as e:
- return (f'Bad parameter: {e.param}, {e}', e.status)
- except OperationException as e:
- log.error(f'Unhandled remote exception while retrieving workflows', exc_info=e)
- return '', e.status
-
-def log_operation(name): # noqa: E501
- """get operation by name
-
- retrieves an operation log by its name # noqa: E501
-
- :param name:
- :type name: str
-
- :rtype: str
- """
- if not name or name == '':
- return ''
-
- return workflow_service.log_operation(name)
-
diff --git a/applications/workflows/src/workflows_api/models/__init__.py b/applications/workflows/src/workflows_api/models/__init__.py
deleted file mode 100644
index ba414fcd..00000000
--- a/applications/workflows/src/workflows_api/models/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# coding: utf-8
-
-# flake8: noqa
-from __future__ import absolute_import
-# import models into model package
-from workflows_api.models.operation import Operation
-from workflows_api.models.operation_search_result import OperationSearchResult
-from workflows_api.models.operation_status import OperationStatus
-from workflows_api.models.search_result_data import SearchResultData
diff --git a/applications/workflows/src/workflows_api/models/operation.py b/applications/workflows/src/workflows_api/models/operation.py
deleted file mode 100644
index 66110437..00000000
--- a/applications/workflows/src/workflows_api/models/operation.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from workflows_api.models.base_model_ import Model
-from workflows_api import util
-from .operation_status import OperationStatus
-
-class Operation(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, message=None, name=None, create_time=None, status=None, workflow=None): # noqa: E501
- """Operation - a model defined in OpenAPI
-
- :param message: The message of this Operation. # noqa: E501
- :type message: str
- :param name: The name of this Operation. # noqa: E501
- :type name: str
- :param create_time: The create_time of this Operation. # noqa: E501
- :type create_time: datetime
- :param status: The status of this Operation. # noqa: E501
- :type status: OperationStatus
- :param workflow: The workflow of this Operation. # noqa: E501
- :type workflow: str
- """
- self.openapi_types = {
- 'message': str,
- 'name': str,
- 'create_time': datetime,
- 'status': OperationStatus,
- 'workflow': str
- }
-
- self.attribute_map = {
- 'message': 'message',
- 'name': 'name',
- 'create_time': 'createTime',
- 'status': 'status',
- 'workflow': 'workflow'
- }
-
- self._message = message
- self._name = name
- self._create_time = create_time
- self._status = status
- self._workflow = workflow
-
- @classmethod
- def from_dict(cls, dikt) -> 'Operation':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The Operation of this Operation. # noqa: E501
- :rtype: Operation
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def message(self):
- """Gets the message of this Operation.
-
- usually set when an error occurred # noqa: E501
-
- :return: The message of this Operation.
- :rtype: str
- """
- return self._message
-
- @message.setter
- def message(self, message):
- """Sets the message of this Operation.
-
- usually set when an error occurred # noqa: E501
-
- :param message: The message of this Operation.
- :type message: str
- """
-
- self._message = message
-
- @property
- def name(self):
- """Gets the name of this Operation.
-
- operation name # noqa: E501
-
- :return: The name of this Operation.
- :rtype: str
- """
- return self._name
-
- @name.setter
- def name(self, name):
- """Sets the name of this Operation.
-
- operation name # noqa: E501
-
- :param name: The name of this Operation.
- :type name: str
- """
-
- self._name = name
-
- @property
- def create_time(self):
- """Gets the create_time of this Operation.
-
-
- :return: The create_time of this Operation.
- :rtype: datetime
- """
- return self._create_time
-
- @create_time.setter
- def create_time(self, create_time):
- """Sets the create_time of this Operation.
-
-
- :param create_time: The create_time of this Operation.
- :type create_time: datetime
- """
-
- self._create_time = create_time
-
- @property
- def status(self):
- """Gets the status of this Operation.
-
-
- :return: The status of this Operation.
- :rtype: OperationStatus
- """
- return self._status
-
- @status.setter
- def status(self, status):
- """Sets the status of this Operation.
-
-
- :param status: The status of this Operation.
- :type status: OperationStatus
- """
-
- self._status = status
-
- @property
- def workflow(self):
- """Gets the workflow of this Operation.
-
- low level representation as an Argo json # noqa: E501
-
- :return: The workflow of this Operation.
- :rtype: str
- """
- return self._workflow
-
- @workflow.setter
- def workflow(self, workflow):
- """Sets the workflow of this Operation.
-
- low level representation as an Argo json # noqa: E501
-
- :param workflow: The workflow of this Operation.
- :type workflow: str
- """
-
- self._workflow = workflow
diff --git a/applications/workflows/src/workflows_api/models/operation_search_result.py b/applications/workflows/src/workflows_api/models/operation_search_result.py
deleted file mode 100644
index 7c5f9770..00000000
--- a/applications/workflows/src/workflows_api/models/operation_search_result.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from workflows_api.models.base_model_ import Model
-from workflows_api import util
-from workflows_api.models.search_result_data import SearchResultData
-from workflows_api.models.operation import Operation
-
-class OperationSearchResult(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, meta=None, items=None): # noqa: E501
- """OperationSearchResult - a model defined in OpenAPI
-
- :param meta: The meta of this OperationSearchResult. # noqa: E501
- :type meta: SearchResultData
- :param items: The items of this OperationSearchResult. # noqa: E501
- :type items: List[Operation]
- """
- self.openapi_types = {
- 'meta': SearchResultData,
- 'items': List[Operation]
- }
-
- self.attribute_map = {
- 'meta': 'meta',
- 'items': 'items'
- }
-
- self._meta = meta
- self._items = items
-
- @classmethod
- def from_dict(cls, dikt) -> 'OperationSearchResult':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The OperationSearchResult of this OperationSearchResult. # noqa: E501
- :rtype: OperationSearchResult
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def meta(self):
- """Gets the meta of this OperationSearchResult.
-
-
- :return: The meta of this OperationSearchResult.
- :rtype: SearchResultData
- """
- return self._meta
-
- @meta.setter
- def meta(self, meta):
- """Sets the meta of this OperationSearchResult.
-
-
- :param meta: The meta of this OperationSearchResult.
- :type meta: SearchResultData
- """
-
- self._meta = meta
-
- @property
- def items(self):
- """Gets the items of this OperationSearchResult.
-
-
- :return: The items of this OperationSearchResult.
- :rtype: List[Operation]
- """
- return self._items
-
- @items.setter
- def items(self, items):
- """Sets the items of this OperationSearchResult.
-
-
- :param items: The items of this OperationSearchResult.
- :type items: List[Operation]
- """
-
- self._items = items
diff --git a/applications/workflows/src/workflows_api/models/operation_status.py b/applications/workflows/src/workflows_api/models/operation_status.py
deleted file mode 100644
index 1b2ba6c7..00000000
--- a/applications/workflows/src/workflows_api/models/operation_status.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from workflows_api.models.base_model_ import Model
-from workflows_api import util
-
-
-class OperationStatus(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- """
- allowed enum values
- """
- PENDING = "Pending"
- RUNNING = "Running"
- ERROR = "Error"
- SUCCEEDED = "Succeeded"
- SKIPPED = "Skipped"
- FAILED = "Failed"
-
- def __init__(self): # noqa: E501
- """OperationStatus - a model defined in OpenAPI
-
- """
- self.openapi_types = {
- }
-
- self.attribute_map = {
- }
-
- @classmethod
- def from_dict(cls, dikt) -> 'OperationStatus':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The OperationStatus of this OperationStatus. # noqa: E501
- :rtype: OperationStatus
- """
- return util.deserialize_model(dikt, cls)
diff --git a/applications/workflows/src/workflows_api/models/search_result_data.py b/applications/workflows/src/workflows_api/models/search_result_data.py
deleted file mode 100644
index e50fafec..00000000
--- a/applications/workflows/src/workflows_api/models/search_result_data.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from datetime import date, datetime # noqa: F401
-
-from typing import List, Dict # noqa: F401
-
-from workflows_api.models.base_model_ import Model
-from workflows_api import util
-
-
-class SearchResultData(Model):
- """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
-
- Do not edit the class manually.
- """
-
- def __init__(self, continue_token=None): # noqa: E501
- """SearchResultData - a model defined in OpenAPI
-
- :param continue_token: The continue_token of this SearchResultData. # noqa: E501
- :type continue_token: str
- """
- self.openapi_types = {
- 'continue_token': str
- }
-
- self.attribute_map = {
- 'continue_token': 'continueToken'
- }
-
- self._continue_token = continue_token
-
- @classmethod
- def from_dict(cls, dikt) -> 'SearchResultData':
- """Returns the dict as a model
-
- :param dikt: A dict.
- :type: dict
- :return: The SearchResultData of this SearchResultData. # noqa: E501
- :rtype: SearchResultData
- """
- return util.deserialize_model(dikt, cls)
-
- @property
- def continue_token(self):
- """Gets the continue_token of this SearchResultData.
-
- token to use for pagination # noqa: E501
-
- :return: The continue_token of this SearchResultData.
- :rtype: str
- """
- return self._continue_token
-
- @continue_token.setter
- def continue_token(self, continue_token):
- """Sets the continue_token of this SearchResultData.
-
- token to use for pagination # noqa: E501
-
- :param continue_token: The continue_token of this SearchResultData.
- :type continue_token: str
- """
-
- self._continue_token = continue_token
diff --git a/applications/workflows/src/workflows_api/openapi/openapi.yaml b/applications/workflows/src/workflows_api/openapi/openapi.yaml
deleted file mode 100644
index 05556983..00000000
--- a/applications/workflows/src/workflows_api/openapi/openapi.yaml
+++ /dev/null
@@ -1,204 +0,0 @@
-openapi: 3.0.0
-info:
- contact:
- email: cloudharness@metacell.us
- description: Workflows API
- license:
- name: UNLICENSED
- title: Workflows API
- version: 0.1.0
-servers:
-- description: Metacell host
- url: https://workflows.cloudharness.metacell.us
-tags:
-- description: standard creation, listing and retrieve
- name: Create and Access
-paths:
- /operations:
- get:
- description: |
- see all operations for the user
- operationId: list_operations
- parameters:
- - description: filter by status
- example: QUEUED
- explode: true
- in: query
- name: status
- required: false
- schema:
- $ref: '#/components/schemas/OperationStatus'
- style: form
- - description: continue previous search (pagination chunks)
- explode: true
- in: query
- name: previous_search_token
- required: false
- schema:
- type: string
- style: form
- - description: maximum number of records to return per page
- explode: true
- in: query
- name: limit
- required: false
- schema:
- default: 10
- maximum: 50
- minimum: 1
- type: integer
- style: form
- responses:
- "200":
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/OperationSearchResult'
- description: search results matching criteria
- "400":
- description: bad input parameter
- summary: lists operations
- tags:
- - Create and Access
- x-openapi-router-controller: workflows_api.controllers.create_and_access_controller
- /operations/{name}:
- delete:
- description: |
- delete operation by its name
- operationId: delete_operation
- parameters:
- - explode: false
- in: path
- name: name
- required: true
- schema:
- type: string
- style: simple
- responses:
- "200":
- description: delete OK
- "404":
- description: not found
- summary: deletes operation by name
- tags:
- - Create and Access
- x-openapi-router-controller: workflows_api.controllers.create_and_access_controller
- get:
- description: |
- retrieves an operation by its name
- operationId: get_operation
- parameters:
- - explode: false
- in: path
- name: name
- required: true
- schema:
- type: string
- style: simple
- responses:
- "200":
- content:
- application/json:
- schema:
- items:
- $ref: '#/components/schemas/Operation'
- type: array
- description: search results matching criteria
- "404":
- description: not found
- summary: get operation by name
- tags:
- - Create and Access
- x-openapi-router-controller: workflows_api.controllers.create_and_access_controller
- /operations/{name}/logs:
- get:
- description: |
- retrieves an operation log by its name
- operationId: log_operation
- parameters:
- - explode: false
- in: path
- name: name
- required: true
- schema:
- type: string
- style: simple
- responses:
- "200":
- content:
- text/plain:
- schema:
- example: Hello world
- type: string
- description: search results matching criteria
- "404":
- description: not found
- summary: get operation by name
- tags:
- - Create and Access
- x-openapi-router-controller: workflows_api.controllers.create_and_access_controller
-components:
- schemas:
- OperationSearchResult:
- description: a list of operations with meta data about the result
- example:
- meta:
- continueToken: continueToken
- items:
- - workflow: workflow
- createTime: 2016-08-29T09:12:33.001Z
- name: name
- message: message
- - workflow: workflow
- createTime: 2016-08-29T09:12:33.001Z
- name: name
- message: message
- properties:
- meta:
- $ref: '#/components/schemas/SearchResultData'
- items:
- items:
- $ref: '#/components/schemas/Operation'
- type: array
- SearchResultData:
- description: describes a search
- example:
- continueToken: continueToken
- properties:
- continueToken:
- description: token to use for pagination
- type: string
- Operation:
- description: represents the status of a distributed API call
- example:
- workflow: workflow
- createTime: 2016-08-29T09:12:33.001Z
- name: name
- message: message
- properties:
- message:
- description: usually set when an error occurred
- type: string
- name:
- description: operation name
- type: string
- createTime:
- example: 2016-08-29T09:12:33.001Z
- format: date-time
- readOnly: true
- type: string
- status:
- $ref: '#/components/schemas/OperationStatus'
- workflow:
- description: low level representation as an Argo json
- type: string
- OperationStatus:
- default: Pending
- enum:
- - Pending
- - Running
- - Error
- - Succeeded
- - Skipped
- - Failed
- type: string
diff --git a/applications/workflows/src/workflows_api/test/test_create_and_access_controller.py b/applications/workflows/src/workflows_api/test/test_create_and_access_controller.py
deleted file mode 100644
index 04b7ca3b..00000000
--- a/applications/workflows/src/workflows_api/test/test_create_and_access_controller.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-import unittest
-
-from flask import json
-from six import BytesIO
-
-from workflows_api.models.operation import Operation # noqa: E501
-from workflows_api.models.operation_search_result import OperationSearchResult # noqa: E501
-from workflows_api.models.operation_status import OperationStatus # noqa: E501
-from workflows_api.test import BaseTestCase
-
-
-class TestCreateAndAccessController(BaseTestCase):
- """CreateAndAccessController integration test stubs"""
-
- def test_delete_operation(self):
- """Test case for delete_operation
-
- deletes operation by name
- """
- headers = {
- }
- response = self.client.open(
- '/operations/{name}'.format(name='name_example'),
- method='DELETE',
- headers=headers)
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
- def test_get_operation(self):
- """Test case for get_operation
-
- get operation by name
- """
- headers = {
- 'Accept': 'application/json',
- }
- response = self.client.open(
- '/operations/{name}'.format(name='name_example'),
- method='GET',
- headers=headers)
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
- def test_list_operations(self):
- """Test case for list_operations
-
- lists operations
- """
- query_string = [('status', QUEUED),
- ('previous_search_token', 'previous_search_token_example'),
- ('limit', 10)]
- headers = {
- 'Accept': 'application/json',
- }
- response = self.client.open(
- '/operations',
- method='GET',
- headers=headers,
- query_string=query_string)
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
- def test_log_operation(self):
- """Test case for log_operation
-
- get operation by name
- """
- headers = {
- 'Accept': 'text/plain',
- }
- response = self.client.open(
- '/operations/{name}/logs'.format(name='name_example'),
- method='GET',
- headers=headers)
- self.assert200(response,
- 'Response body is : ' + response.data.decode('utf-8'))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/applications/workflows/tasks/extract-download/Dockerfile b/applications/workflows/tasks/extract-download/Dockerfile
index a22a9339..168dd5e7 100644
--- a/applications/workflows/tasks/extract-download/Dockerfile
+++ b/applications/workflows/tasks/extract-download/Dockerfile
@@ -1,8 +1,10 @@
-FROM python:3.7-alpine
+ARG REGISTRY
+ARG TAG=latest
+FROM ${REGISTRY}cloudharness-base:${TAG}
ADD . /
ENV url 'https://www.google.com'
ENV shared_directory /
-CMD python main.py $url $shared_directory
\ No newline at end of file
+CMD env && python main.py $url $shared_directory $folder
\ No newline at end of file
diff --git a/applications/workflows/tasks/extract-download/main.py b/applications/workflows/tasks/extract-download/main.py
index efec501a..13bf858a 100644
--- a/applications/workflows/tasks/extract-download/main.py
+++ b/applications/workflows/tasks/extract-download/main.py
@@ -1,16 +1,45 @@
import urllib.request
import sys
import logging
-import os
+import os, stat
+from pathlib import Path
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+logging.basicConfig(stream=sys.stdout, level=logging.INFO)
assert len(sys.argv) > 2, 'Arguments not specified. Cannot download'
url = sys.argv[1]
-download_path = sys.argv[2]
+download_path = sys.argv[2].split(':')[-1]
+
+if len(sys.argv) == 4:
+ folder = sys.argv[3]
+else:
+ folder = "."
+
+download_path = os.path.join(download_path, folder)
+Path(download_path).mkdir(parents=True, exist_ok=True)
dest = os.path.join(download_path, url.split('/')[-1])
+
logging.info("Downloading {} to {}".format(url, dest))
urllib.request.urlretrieve(url, dest)
+
+# test to see if the file is an zip archive
+import mimetypes
+mime = mimetypes.MimeTypes().guess_type(dest)[0]
+if mime == "application/zip":
+ # extract it to the download_path folder
+ import zipfile
+ with zipfile.ZipFile(dest, 'r') as zip_ref:
+ zip_ref.extractall(download_path)
+ os.remove(dest)
+
+# TODO: may be a bit too drastic, may be only change the destination but how to handle the zip files where if there is a folder it is unknow
+mode_file = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH # r+w g+o
+mode_dir = mode_file | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH # for dirs also execute
+for dirpath, dirnames, filenames in os.walk(download_path):
+ os.chmod(dirpath, mode_dir)
+ for filename in filenames:
+ os.chmod(os.path.join(dirpath, filename), mode_file)
+ logging.info(f"chmod {dirpath}/{filename} to {mode_file}")
diff --git a/applications/workflows/tasks/notify-queue/Dockerfile b/applications/workflows/tasks/notify-queue/Dockerfile
new file mode 100644
index 00000000..4c73b4a0
--- /dev/null
+++ b/applications/workflows/tasks/notify-queue/Dockerfile
@@ -0,0 +1,11 @@
+ARG REGISTRY
+ARG TAG=latest
+FROM ${REGISTRY}cloudharness-base:${TAG}
+
+ADD . /
+
+ENV workflow_result 'Failure'
+ENV queue_name 'None'
+ENV payload '{}'
+
+CMD python main.py ${workflow_result} ${queue_name} ${payload}
diff --git a/applications/workflows/tasks/notify-queue/main.py b/applications/workflows/tasks/notify-queue/main.py
new file mode 100644
index 00000000..800695cc
--- /dev/null
+++ b/applications/workflows/tasks/notify-queue/main.py
@@ -0,0 +1,12 @@
+import sys
+import logging
+
+logging.basicConfig(stream=sys.stdout, level=logging.INFO)
+
+assert len(sys.argv) > 3, 'Not all arguments not specified. Cannot notify queue. Usage: [workflow status] [queue name] [payload]'
+
+from cloudharness.workflows.utils import notify_queue
+
+queue = sys.argv[2]
+message = {'status': sys.argv[1], 'payload': sys.argv[3]}
+notify_queue(queue, message)
diff --git a/applications/workflows/tasks/send-result-event/Dockerfile b/applications/workflows/tasks/send-result-event/Dockerfile
index 19bb7d61..680153b7 100644
--- a/applications/workflows/tasks/send-result-event/Dockerfile
+++ b/applications/workflows/tasks/send-result-event/Dockerfile
@@ -1,4 +1,4 @@
-ARG REGISTRY=r.cfcr.io/tarelli/
+ARG REGISTRY
ARG TAG=latest
FROM ${REGISTRY}cloudharness-base:${TAG}
diff --git a/blueprint/deployment-configuration/codefresh-template.yaml b/blueprint/deployment-configuration/codefresh-template.yaml
index c2cc568a..7ee58e06 100644
--- a/blueprint/deployment-configuration/codefresh-template.yaml
+++ b/blueprint/deployment-configuration/codefresh-template.yaml
@@ -14,7 +14,7 @@ steps:
post_main_clone:
title: Post main clone
type: parallel
- stage: build
+ stage: prepare
steps:
- title: Cloning cloud-harness repository...
type: git-clone
diff --git a/client/cloudharness_cli/README.md b/client/cloudharness_cli/README.md
index d3290b84..874a2ed2 100644
--- a/client/cloudharness_cli/README.md
+++ b/client/cloudharness_cli/README.md
@@ -97,6 +97,99 @@ Class | Method | HTTP request | Description
cloudharness@metacell.us
+# cloudharness-cli.common
+Cloud Harness Platform - Reference CH service API
+
+This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project:
+
+- API version: 0.1.0
+- Package version: 1.0.0
+- Build package: org.openapitools.codegen.languages.PythonClientCodegen
+
+## Requirements.
+
+Python 2.7 and 3.4+
+
+## Installation & Usage
+### pip install
+
+If the python package is hosted on a repository, you can install directly using:
+
+```sh
+pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git
+```
+(you may need to run `pip` with root permission: `sudo pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git`)
+
+Then import the package:
+```python
+import cloudharness_cli.common
+```
+
+### Setuptools
+
+Install via [Setuptools](http://pypi.python.org/pypi/setuptools).
+
+```sh
+python setup.py install --user
+```
+(or `sudo python setup.py install` to install the package for all users)
+
+Then import the package:
+```python
+import cloudharness_cli.common
+```
+
+## Getting Started
+
+Please follow the [installation procedure](#installation--usage) and then run the following:
+
+```python
+from __future__ import print_function
+import time
+import cloudharness_cli.common
+from cloudharness_cli.common.rest import ApiException
+from pprint import pprint
+
+
+# Defining host is optional and default to http://localhost/api
+configuration.host = "http://localhost/api"
+# Enter a context with an instance of the API client
+with cloudharness_cli.common.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = cloudharness_cli.common.SentryApi(api_client)
+ appname = 'appname_example' # str |
+
+ try:
+ # Gets the Sentry DSN for a given application
+ api_response = api_instance.getdsn(appname)
+ pprint(api_response)
+ except ApiException as e:
+ print("Exception when calling SentryApi->getdsn: %s\n" % e)
+
+```
+
+## Documentation for API Endpoints
+
+All URIs are relative to *http://localhost/api*
+
+Class | Method | HTTP request | Description
+------------ | ------------- | ------------- | -------------
+*SentryApi* | [**getdsn**](docs/common/SentryApi.md#getdsn) | **GET** /sentry/getdsn/{appname} | Gets the Sentry DSN for a given application
+
+
+## Documentation For Models
+
+
+
+## Documentation For Authorization
+
+ All endpoints do not require authorization.
+
+## Author
+
+
+
+
# cloudharness-cli.samples
CloudHarness Sample api
@@ -180,6 +273,7 @@ All URIs are relative to *https://samples.cloudharness.metacell.us/api*
Class | Method | HTTP request | Description
------------ | ------------- | ------------- | -------------
*AuthApi* | [**valid_token**](docs/samples/AuthApi.md#valid_token) | **GET** /valid | Check if the token is valid. Get a token by logging into the base url
+*WorkflowsApi* | [**error**](docs/samples/WorkflowsApi.md#error) | **GET** /error | test sentry is working
*WorkflowsApi* | [**submit_async**](docs/samples/WorkflowsApi.md#submit_async) | **GET** /operation_async | Send an asynchronous operation
*WorkflowsApi* | [**submit_sync**](docs/samples/WorkflowsApi.md#submit_sync) | **GET** /operation_sync | Send a synchronous operation
*WorkflowsApi* | [**submit_sync_with_results**](docs/samples/WorkflowsApi.md#submit_sync_with_results) | **GET** /operation_sync_results | Send a synchronous operation and get results using the event queue. Just a sum, but in the cloud
diff --git a/client/cloudharness_cli/cloudharness_cli.egg-info/SOURCES.txt b/client/cloudharness_cli/cloudharness_cli.egg-info/SOURCES.txt
index 8f65176a..ce02554f 100644
--- a/client/cloudharness_cli/cloudharness_cli.egg-info/SOURCES.txt
+++ b/client/cloudharness_cli/cloudharness_cli.egg-info/SOURCES.txt
@@ -6,6 +6,14 @@ cloudharness_cli.egg-info/SOURCES.txt
cloudharness_cli.egg-info/dependency_links.txt
cloudharness_cli.egg-info/requires.txt
cloudharness_cli.egg-info/top_level.txt
+cloudharness_cli/common/__init__.py
+cloudharness_cli/common/api_client.py
+cloudharness_cli/common/configuration.py
+cloudharness_cli/common/exceptions.py
+cloudharness_cli/common/rest.py
+cloudharness_cli/common/api/__init__.py
+cloudharness_cli/common/api/sentry_api.py
+cloudharness_cli/common/models/__init__.py
cloudharness_cli/samples/__init__.py
cloudharness_cli/samples/api_client.py
cloudharness_cli/samples/configuration.py
diff --git a/client/cloudharness_cli/cloudharness_cli/common/__init__.py b/client/cloudharness_cli/cloudharness_cli/common/__init__.py
new file mode 100644
index 00000000..ca7fc214
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/__init__.py
@@ -0,0 +1,31 @@
+# coding: utf-8
+
+# flake8: noqa
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+__version__ = "1.0.0"
+
+# import apis into sdk package
+from cloudharness_cli.common.api.sentry_api import SentryApi
+
+# import ApiClient
+from cloudharness_cli.common.api_client import ApiClient
+from cloudharness_cli.common.configuration import Configuration
+from cloudharness_cli.common.exceptions import OpenApiException
+from cloudharness_cli.common.exceptions import ApiTypeError
+from cloudharness_cli.common.exceptions import ApiValueError
+from cloudharness_cli.common.exceptions import ApiKeyError
+from cloudharness_cli.common.exceptions import ApiException
+# import models into sdk package
+
diff --git a/client/cloudharness_cli/cloudharness_cli/common/api/__init__.py b/client/cloudharness_cli/cloudharness_cli/common/api/__init__.py
new file mode 100644
index 00000000..189f1a1e
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/api/__init__.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+
+# flake8: noqa
+
+# import apis into api package
+from cloudharness_cli.common.api.sentry_api import SentryApi
diff --git a/client/cloudharness_cli/cloudharness_cli/common/api/sentry_api.py b/client/cloudharness_cli/cloudharness_cli/common/api/sentry_api.py
new file mode 100644
index 00000000..e71f13a5
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/api/sentry_api.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from cloudharness_cli.common.api_client import ApiClient
+from cloudharness_cli.common.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class SentryApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def getdsn(self, appname, **kwargs): # noqa: E501
+ """Gets the Sentry DSN for a given application # noqa: E501
+
+ Gets the Sentry DSN for a given application # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.getdsn(appname, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str appname: (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.getdsn_with_http_info(appname, **kwargs) # noqa: E501
+
+ def getdsn_with_http_info(self, appname, **kwargs): # noqa: E501
+ """Gets the Sentry DSN for a given application # noqa: E501
+
+ Gets the Sentry DSN for a given application # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.getdsn_with_http_info(appname, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str appname: (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'appname'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method getdsn" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'appname' is set
+ if self.api_client.client_side_validation and ('appname' not in local_var_params or # noqa: E501
+ local_var_params['appname'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `appname` when calling `getdsn`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'appname' in local_var_params:
+ path_params['appname'] = local_var_params['appname'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = [] # noqa: E501
+
+ return self.api_client.call_api(
+ '/sentry/getdsn/{appname}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/client/cloudharness_cli/cloudharness_cli/common/api_client.py b/client/cloudharness_cli/cloudharness_cli/common/api_client.py
new file mode 100644
index 00000000..7f590406
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/api_client.py
@@ -0,0 +1,647 @@
+# coding: utf-8
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+from __future__ import absolute_import
+
+import atexit
+import datetime
+from dateutil.parser import parse
+import json
+import mimetypes
+from multiprocessing.pool import ThreadPool
+import os
+import re
+import tempfile
+
+# python 2 and python 3 compatibility library
+import six
+from six.moves.urllib.parse import quote
+
+from cloudharness_cli.common.configuration import Configuration
+import cloudharness_cli.common.models
+from cloudharness_cli.common import rest
+from cloudharness_cli.common.exceptions import ApiValueError
+
+
+class ApiClient(object):
+ """Generic API client for OpenAPI client library builds.
+
+ OpenAPI generic API client. This client handles the client-
+ server communication, and is invariant across implementations. Specifics of
+ the methods and models for each application are generated from the OpenAPI
+ templates.
+
+ NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+ Do not edit the class manually.
+
+ :param configuration: .Configuration object for this client
+ :param header_name: a header to pass when making calls to the API.
+ :param header_value: a header value to pass when making calls to
+ the API.
+ :param cookie: a cookie to include in the header when making calls
+ to the API
+ :param pool_threads: The number of threads to use for async requests
+ to the API. More threads means more concurrent API requests.
+ """
+
+ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
+ NATIVE_TYPES_MAPPING = {
+ 'int': int,
+ 'long': int if six.PY3 else long, # noqa: F821
+ 'float': float,
+ 'str': str,
+ 'bool': bool,
+ 'date': datetime.date,
+ 'datetime': datetime.datetime,
+ 'object': object,
+ }
+ _pool = None
+
+ def __init__(self, configuration=None, header_name=None, header_value=None,
+ cookie=None, pool_threads=1):
+ if configuration is None:
+ configuration = Configuration.get_default_copy()
+ self.configuration = configuration
+ self.pool_threads = pool_threads
+
+ self.rest_client = rest.RESTClientObject(configuration)
+ self.default_headers = {}
+ if header_name is not None:
+ self.default_headers[header_name] = header_value
+ self.cookie = cookie
+ # Set default User-Agent.
+ self.user_agent = 'OpenAPI-Generator/1.0.0/python'
+ self.client_side_validation = configuration.client_side_validation
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ if self._pool:
+ self._pool.close()
+ self._pool.join()
+ self._pool = None
+ if hasattr(atexit, 'unregister'):
+ atexit.unregister(self.close)
+
+ @property
+ def pool(self):
+ """Create thread pool on first request
+ avoids instantiating unused threadpool for blocking clients.
+ """
+ if self._pool is None:
+ atexit.register(self.close)
+ self._pool = ThreadPool(self.pool_threads)
+ return self._pool
+
+ @property
+ def user_agent(self):
+ """User agent for this API client"""
+ return self.default_headers['User-Agent']
+
+ @user_agent.setter
+ def user_agent(self, value):
+ self.default_headers['User-Agent'] = value
+
+ def set_default_header(self, header_name, header_value):
+ self.default_headers[header_name] = header_value
+
+ def __call_api(
+ self, resource_path, method, path_params=None,
+ query_params=None, header_params=None, body=None, post_params=None,
+ files=None, response_type=None, auth_settings=None,
+ _return_http_data_only=None, collection_formats=None,
+ _preload_content=True, _request_timeout=None, _host=None):
+
+ config = self.configuration
+
+ # header parameters
+ header_params = header_params or {}
+ header_params.update(self.default_headers)
+ if self.cookie:
+ header_params['Cookie'] = self.cookie
+ if header_params:
+ header_params = self.sanitize_for_serialization(header_params)
+ header_params = dict(self.parameters_to_tuples(header_params,
+ collection_formats))
+
+ # path parameters
+ if path_params:
+ path_params = self.sanitize_for_serialization(path_params)
+ path_params = self.parameters_to_tuples(path_params,
+ collection_formats)
+ for k, v in path_params:
+ # specified safe chars, encode everything
+ resource_path = resource_path.replace(
+ '{%s}' % k,
+ quote(str(v), safe=config.safe_chars_for_path_param)
+ )
+
+ # query parameters
+ if query_params:
+ query_params = self.sanitize_for_serialization(query_params)
+ query_params = self.parameters_to_tuples(query_params,
+ collection_formats)
+
+ # post parameters
+ if post_params or files:
+ post_params = post_params if post_params else []
+ post_params = self.sanitize_for_serialization(post_params)
+ post_params = self.parameters_to_tuples(post_params,
+ collection_formats)
+ post_params.extend(self.files_parameters(files))
+
+ # auth setting
+ self.update_params_for_auth(header_params, query_params, auth_settings)
+
+ # body
+ if body:
+ body = self.sanitize_for_serialization(body)
+
+ # request url
+ if _host is None:
+ url = self.configuration.host + resource_path
+ else:
+ # use server/host defined in path or operation instead
+ url = _host + resource_path
+
+ # perform request and return response
+ response_data = self.request(
+ method, url, query_params=query_params, headers=header_params,
+ post_params=post_params, body=body,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+
+ self.last_response = response_data
+
+ return_data = response_data
+ if _preload_content:
+ # deserialize response data
+ if response_type:
+ return_data = self.deserialize(response_data, response_type)
+ else:
+ return_data = None
+
+ if _return_http_data_only:
+ return (return_data)
+ else:
+ return (return_data, response_data.status,
+ response_data.getheaders())
+
+ def sanitize_for_serialization(self, obj):
+ """Builds a JSON POST object.
+
+ If obj is None, return None.
+ If obj is str, int, long, float, bool, return directly.
+ If obj is datetime.datetime, datetime.date
+ convert to string in iso8601 format.
+ If obj is list, sanitize each element in the list.
+ If obj is dict, return the dict.
+ If obj is OpenAPI model, return the properties dict.
+
+ :param obj: The data to serialize.
+ :return: The serialized form of data.
+ """
+ if obj is None:
+ return None
+ elif isinstance(obj, self.PRIMITIVE_TYPES):
+ return obj
+ elif isinstance(obj, list):
+ return [self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj]
+ elif isinstance(obj, tuple):
+ return tuple(self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj)
+ elif isinstance(obj, (datetime.datetime, datetime.date)):
+ return obj.isoformat()
+
+ if isinstance(obj, dict):
+ obj_dict = obj
+ else:
+ # Convert model obj to dict except
+ # attributes `openapi_types`, `attribute_map`
+ # and attributes which value is not None.
+ # Convert attribute name to json key in
+ # model definition for request.
+ obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
+ for attr, _ in six.iteritems(obj.openapi_types)
+ if getattr(obj, attr) is not None}
+
+ return {key: self.sanitize_for_serialization(val)
+ for key, val in six.iteritems(obj_dict)}
+
+ def deserialize(self, response, response_type):
+ """Deserializes response into an object.
+
+ :param response: RESTResponse object to be deserialized.
+ :param response_type: class literal for
+ deserialized object, or string of class name.
+
+ :return: deserialized object.
+ """
+ # handle file downloading
+ # save response body into a tmp file and return the instance
+ if response_type == "file":
+ return self.__deserialize_file(response)
+
+ # fetch data from response object
+ try:
+ data = json.loads(response.data)
+ except ValueError:
+ data = response.data
+
+ return self.__deserialize(data, response_type)
+
+ def __deserialize(self, data, klass):
+ """Deserializes dict, list, str into an object.
+
+ :param data: dict, list or str.
+ :param klass: class literal, or string of class name.
+
+ :return: object.
+ """
+ if data is None:
+ return None
+
+ if type(klass) == str:
+ if klass.startswith('list['):
+ sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
+ return [self.__deserialize(sub_data, sub_kls)
+ for sub_data in data]
+
+ if klass.startswith('dict('):
+ sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
+ return {k: self.__deserialize(v, sub_kls)
+ for k, v in six.iteritems(data)}
+
+ # convert str to class
+ if klass in self.NATIVE_TYPES_MAPPING:
+ klass = self.NATIVE_TYPES_MAPPING[klass]
+ else:
+ klass = getattr(cloudharness_cli.common.models, klass)
+
+ if klass in self.PRIMITIVE_TYPES:
+ return self.__deserialize_primitive(data, klass)
+ elif klass == object:
+ return self.__deserialize_object(data)
+ elif klass == datetime.date:
+ return self.__deserialize_date(data)
+ elif klass == datetime.datetime:
+ return self.__deserialize_datetime(data)
+ else:
+ return self.__deserialize_model(data, klass)
+
+ def call_api(self, resource_path, method,
+ path_params=None, query_params=None, header_params=None,
+ body=None, post_params=None, files=None,
+ response_type=None, auth_settings=None, async_req=None,
+ _return_http_data_only=None, collection_formats=None,
+ _preload_content=True, _request_timeout=None, _host=None):
+ """Makes the HTTP request (synchronous) and returns deserialized data.
+
+ To make an async_req request, set the async_req parameter.
+
+ :param resource_path: Path to method endpoint.
+ :param method: Method to call.
+ :param path_params: Path parameters in the url.
+ :param query_params: Query parameters in the url.
+ :param header_params: Header parameters to be
+ placed in the request header.
+ :param body: Request body.
+ :param post_params dict: Request post form parameters,
+ for `application/x-www-form-urlencoded`, `multipart/form-data`.
+ :param auth_settings list: Auth Settings names for the request.
+ :param response: Response data type.
+ :param files dict: key -> filename, value -> filepath,
+ for `multipart/form-data`.
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param collection_formats: dict of collection formats for path, query,
+ header, and post parameters.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return:
+ If async_req parameter is True,
+ the request will be called asynchronously.
+ The method will return the request thread.
+ If parameter async_req is False or missing,
+ then the method will return the response directly.
+ """
+ if not async_req:
+ return self.__call_api(resource_path, method,
+ path_params, query_params, header_params,
+ body, post_params, files,
+ response_type, auth_settings,
+ _return_http_data_only, collection_formats,
+ _preload_content, _request_timeout, _host)
+
+ return self.pool.apply_async(self.__call_api, (resource_path,
+ method, path_params,
+ query_params,
+ header_params, body,
+ post_params, files,
+ response_type,
+ auth_settings,
+ _return_http_data_only,
+ collection_formats,
+ _preload_content,
+ _request_timeout,
+ _host))
+
+ def request(self, method, url, query_params=None, headers=None,
+ post_params=None, body=None, _preload_content=True,
+ _request_timeout=None):
+ """Makes the HTTP request using RESTClient."""
+ if method == "GET":
+ return self.rest_client.GET(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "HEAD":
+ return self.rest_client.HEAD(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "OPTIONS":
+ return self.rest_client.OPTIONS(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+ elif method == "POST":
+ return self.rest_client.POST(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PUT":
+ return self.rest_client.PUT(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PATCH":
+ return self.rest_client.PATCH(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "DELETE":
+ return self.rest_client.DELETE(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ else:
+ raise ApiValueError(
+ "http method must be `GET`, `HEAD`, `OPTIONS`,"
+ " `POST`, `PATCH`, `PUT` or `DELETE`."
+ )
+
+ def parameters_to_tuples(self, params, collection_formats):
+ """Get parameters as list of tuples, formatting collections.
+
+ :param params: Parameters as dict or list of two-tuples
+ :param dict collection_formats: Parameter collection formats
+ :return: Parameters as list of tuples, collections formatted
+ """
+ new_params = []
+ if collection_formats is None:
+ collection_formats = {}
+ for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
+ if k in collection_formats:
+ collection_format = collection_formats[k]
+ if collection_format == 'multi':
+ new_params.extend((k, value) for value in v)
+ else:
+ if collection_format == 'ssv':
+ delimiter = ' '
+ elif collection_format == 'tsv':
+ delimiter = '\t'
+ elif collection_format == 'pipes':
+ delimiter = '|'
+ else: # csv is the default
+ delimiter = ','
+ new_params.append(
+ (k, delimiter.join(str(value) for value in v)))
+ else:
+ new_params.append((k, v))
+ return new_params
+
+ def files_parameters(self, files=None):
+ """Builds form parameters.
+
+ :param files: File parameters.
+ :return: Form parameters with files.
+ """
+ params = []
+
+ if files:
+ for k, v in six.iteritems(files):
+ if not v:
+ continue
+ file_names = v if type(v) is list else [v]
+ for n in file_names:
+ with open(n, 'rb') as f:
+ filename = os.path.basename(f.name)
+ filedata = f.read()
+ mimetype = (mimetypes.guess_type(filename)[0] or
+ 'application/octet-stream')
+ params.append(
+ tuple([k, tuple([filename, filedata, mimetype])]))
+
+ return params
+
+ def select_header_accept(self, accepts):
+ """Returns `Accept` based on an array of accepts provided.
+
+ :param accepts: List of headers.
+ :return: Accept (e.g. application/json).
+ """
+ if not accepts:
+ return
+
+ accepts = [x.lower() for x in accepts]
+
+ if 'application/json' in accepts:
+ return 'application/json'
+ else:
+ return ', '.join(accepts)
+
+ def select_header_content_type(self, content_types):
+ """Returns `Content-Type` based on an array of content_types provided.
+
+ :param content_types: List of content-types.
+ :return: Content-Type (e.g. application/json).
+ """
+ if not content_types:
+ return 'application/json'
+
+ content_types = [x.lower() for x in content_types]
+
+ if 'application/json' in content_types or '*/*' in content_types:
+ return 'application/json'
+ else:
+ return content_types[0]
+
+ def update_params_for_auth(self, headers, querys, auth_settings):
+ """Updates header and query params based on authentication setting.
+
+ :param headers: Header parameters dict to be updated.
+ :param querys: Query parameters tuple list to be updated.
+ :param auth_settings: Authentication setting identifiers list.
+ """
+ if not auth_settings:
+ return
+
+ for auth in auth_settings:
+ auth_setting = self.configuration.auth_settings().get(auth)
+ if auth_setting:
+ if auth_setting['in'] == 'cookie':
+ headers['Cookie'] = auth_setting['value']
+ elif auth_setting['in'] == 'header':
+ headers[auth_setting['key']] = auth_setting['value']
+ elif auth_setting['in'] == 'query':
+ querys.append((auth_setting['key'], auth_setting['value']))
+ else:
+ raise ApiValueError(
+ 'Authentication token must be in `query` or `header`'
+ )
+
+ def __deserialize_file(self, response):
+ """Deserializes body to file
+
+ Saves response body into a file in a temporary folder,
+ using the filename from the `Content-Disposition` header if provided.
+
+ :param response: RESTResponse.
+ :return: file path.
+ """
+ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
+ os.close(fd)
+ os.remove(path)
+
+ content_disposition = response.getheader("Content-Disposition")
+ if content_disposition:
+ filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
+ content_disposition).group(1)
+ path = os.path.join(os.path.dirname(path), filename)
+
+ with open(path, "wb") as f:
+ f.write(response.data)
+
+ return path
+
+ def __deserialize_primitive(self, data, klass):
+ """Deserializes string to primitive type.
+
+ :param data: str.
+ :param klass: class literal.
+
+ :return: int, long, float, str, bool.
+ """
+ try:
+ return klass(data)
+ except UnicodeEncodeError:
+ return six.text_type(data)
+ except TypeError:
+ return data
+
+ def __deserialize_object(self, value):
+ """Return an original value.
+
+ :return: object.
+ """
+ return value
+
+ def __deserialize_date(self, string):
+ """Deserializes string to date.
+
+ :param string: str.
+ :return: date.
+ """
+ try:
+ return parse(string).date()
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason="Failed to parse `{0}` as date object".format(string)
+ )
+
+ def __deserialize_datetime(self, string):
+ """Deserializes string to datetime.
+
+ The string should be in iso8601 datetime format.
+
+ :param string: str.
+ :return: datetime.
+ """
+ try:
+ return parse(string)
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason=(
+ "Failed to parse `{0}` as datetime object"
+ .format(string)
+ )
+ )
+
+ def __deserialize_model(self, data, klass):
+ """Deserializes list or dict to model.
+
+ :param data: dict, list.
+ :param klass: class literal.
+ :return: model object.
+ """
+
+ if not klass.openapi_types and not hasattr(klass,
+ 'get_real_child_model'):
+ return data
+
+ kwargs = {}
+ if (data is not None and
+ klass.openapi_types is not None and
+ isinstance(data, (list, dict))):
+ for attr, attr_type in six.iteritems(klass.openapi_types):
+ if klass.attribute_map[attr] in data:
+ value = data[klass.attribute_map[attr]]
+ kwargs[attr] = self.__deserialize(value, attr_type)
+
+ instance = klass(**kwargs)
+
+ if hasattr(instance, 'get_real_child_model'):
+ klass_name = instance.get_real_child_model(data)
+ if klass_name:
+ instance = self.__deserialize(data, klass_name)
+ return instance
diff --git a/client/cloudharness_cli/cloudharness_cli/common/configuration.py b/client/cloudharness_cli/cloudharness_cli/common/configuration.py
new file mode 100644
index 00000000..d287a37a
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/configuration.py
@@ -0,0 +1,373 @@
+# coding: utf-8
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import copy
+import logging
+import multiprocessing
+import sys
+import urllib3
+
+import six
+from six.moves import http_client as httplib
+
+
+class Configuration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+
+ Ref: https://openapi-generator.tech
+ Do not edit the class manually.
+
+ :param host: Base url
+ :param api_key: Dict to store API key(s).
+ Each entry in the dict specifies an API key.
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is the API key secret.
+ :param api_key_prefix: Dict to store API prefix (e.g. Bearer)
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is an API key prefix when generating the auth data.
+ :param username: Username for HTTP basic authentication
+ :param password: Password for HTTP basic authentication
+ :param discard_unknown_keys: Boolean value indicating whether to discard
+ unknown properties. A server may send a response that includes additional
+ properties that are not known by the client in the following scenarios:
+ 1. The OpenAPI document is incomplete, i.e. it does not match the server
+ implementation.
+ 2. The client was generated using an older version of the OpenAPI document
+ and the server has been upgraded since then.
+ If a schema in the OpenAPI document defines the additionalProperties attribute,
+ then all undeclared properties received by the server are injected into the
+ additional properties map. In that case, there are undeclared properties, and
+ nothing to discard.
+
+ """
+
+ _default = None
+
+ def __init__(self, host="http://localhost/api",
+ api_key=None, api_key_prefix=None,
+ username=None, password=None,
+ discard_unknown_keys=False,
+ ):
+ """Constructor
+ """
+ self.host = host
+ """Default Base url
+ """
+ self.temp_folder_path = None
+ """Temp file folder for downloading files
+ """
+ # Authentication Settings
+ self.api_key = {}
+ if api_key:
+ self.api_key = api_key
+ """dict to store API key(s)
+ """
+ self.api_key_prefix = {}
+ if api_key_prefix:
+ self.api_key_prefix = api_key_prefix
+ """dict to store API prefix (e.g. Bearer)
+ """
+ self.refresh_api_key_hook = None
+ """function hook to refresh API key if expired
+ """
+ self.username = username
+ """Username for HTTP basic authentication
+ """
+ self.password = password
+ """Password for HTTP basic authentication
+ """
+ self.discard_unknown_keys = discard_unknown_keys
+ self.logger = {}
+ """Logging Settings
+ """
+ self.logger["package_logger"] = logging.getLogger("cloudharness_cli.common")
+ self.logger["urllib3_logger"] = logging.getLogger("urllib3")
+ self.logger_format = '%(asctime)s %(levelname)s %(message)s'
+ """Log format
+ """
+ self.logger_stream_handler = None
+ """Log stream handler
+ """
+ self.logger_file_handler = None
+ """Log file handler
+ """
+ self.logger_file = None
+ """Debug file location
+ """
+ self.debug = False
+ """Debug switch
+ """
+
+ self.verify_ssl = True
+ """SSL/TLS verification
+ Set this to false to skip verifying SSL certificate when calling API
+ from https server.
+ """
+ self.ssl_ca_cert = None
+ """Set this to customize the certificate file to verify the peer.
+ """
+ self.cert_file = None
+ """client certificate file
+ """
+ self.key_file = None
+ """client key file
+ """
+ self.assert_hostname = None
+ """Set this to True/False to enable/disable SSL hostname verification.
+ """
+
+ self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
+ """urllib3 connection pool's maximum number of connections saved
+ per pool. urllib3 uses 1 connection as default value, but this is
+ not the best value when you are making a lot of possibly parallel
+ requests to the same host, which is often the case here.
+ cpu_count * 5 is used as default value to increase performance.
+ """
+
+ self.proxy = None
+ """Proxy URL
+ """
+ self.proxy_headers = None
+ """Proxy headers
+ """
+ self.safe_chars_for_path_param = ''
+ """Safe chars for path_param
+ """
+ self.retries = None
+ """Adding retries to override urllib3 default value 3
+ """
+ # Disable client side validation
+ self.client_side_validation = True
+
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ result = cls.__new__(cls)
+ memo[id(self)] = result
+ for k, v in self.__dict__.items():
+ if k not in ('logger', 'logger_file_handler'):
+ setattr(result, k, copy.deepcopy(v, memo))
+ # shallow copy of loggers
+ result.logger = copy.copy(self.logger)
+ # use setters to configure loggers
+ result.logger_file = self.logger_file
+ result.debug = self.debug
+ return result
+
+ @classmethod
+ def set_default(cls, default):
+ """Set default instance of configuration.
+
+ It stores default configuration, which can be
+ returned by get_default_copy method.
+
+ :param default: object of Configuration
+ """
+ cls._default = copy.deepcopy(default)
+
+ @classmethod
+ def get_default_copy(cls):
+ """Return new instance of configuration.
+
+ This method returns newly created, based on default constructor,
+ object of Configuration class or returns a copy of default
+ configuration passed by the set_default method.
+
+ :return: The configuration object.
+ """
+ if cls._default is not None:
+ return copy.deepcopy(cls._default)
+ return Configuration()
+
+ @property
+ def logger_file(self):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ return self.__logger_file
+
+ @logger_file.setter
+ def logger_file(self, value):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ self.__logger_file = value
+ if self.__logger_file:
+ # If set logging file,
+ # then add file handler and remove stream handler.
+ self.logger_file_handler = logging.FileHandler(self.__logger_file)
+ self.logger_file_handler.setFormatter(self.logger_formatter)
+ for _, logger in six.iteritems(self.logger):
+ logger.addHandler(self.logger_file_handler)
+
+ @property
+ def debug(self):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ return self.__debug
+
+ @debug.setter
+ def debug(self, value):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ self.__debug = value
+ if self.__debug:
+ # if debug status is True, turn on debug logging
+ for _, logger in six.iteritems(self.logger):
+ logger.setLevel(logging.DEBUG)
+ # turn on httplib debug
+ httplib.HTTPConnection.debuglevel = 1
+ else:
+ # if debug status is False, turn off debug logging,
+ # setting log level to default `logging.WARNING`
+ for _, logger in six.iteritems(self.logger):
+ logger.setLevel(logging.WARNING)
+ # turn off httplib debug
+ httplib.HTTPConnection.debuglevel = 0
+
+ @property
+ def logger_format(self):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ return self.__logger_format
+
+ @logger_format.setter
+ def logger_format(self, value):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ self.__logger_format = value
+ self.logger_formatter = logging.Formatter(self.__logger_format)
+
+ def get_api_key_with_prefix(self, identifier):
+ """Gets API key (with prefix if set).
+
+ :param identifier: The identifier of apiKey.
+ :return: The token for api key authentication.
+ """
+ if self.refresh_api_key_hook is not None:
+ self.refresh_api_key_hook(self)
+ key = self.api_key.get(identifier)
+ if key:
+ prefix = self.api_key_prefix.get(identifier)
+ if prefix:
+ return "%s %s" % (prefix, key)
+ else:
+ return key
+
+ def get_basic_auth_token(self):
+ """Gets HTTP basic authentication header (string).
+
+ :return: The token for basic HTTP authentication.
+ """
+ username = ""
+ if self.username is not None:
+ username = self.username
+ password = ""
+ if self.password is not None:
+ password = self.password
+ return urllib3.util.make_headers(
+ basic_auth=username + ':' + password
+ ).get('authorization')
+
+ def auth_settings(self):
+ """Gets Auth Settings dict for api client.
+
+ :return: The Auth Settings information dict.
+ """
+ auth = {}
+ return auth
+
+ def to_debug_report(self):
+ """Gets the essential information for debugging.
+
+ :return: The report for debugging.
+ """
+ return "Python SDK Debug Report:\n"\
+ "OS: {env}\n"\
+ "Python Version: {pyversion}\n"\
+ "Version of the API: 0.1.0\n"\
+ "SDK Package Version: 1.0.0".\
+ format(env=sys.platform, pyversion=sys.version)
+
+ def get_host_settings(self):
+ """Gets an array of host settings
+
+ :return: An array of host settings
+ """
+ return [
+ {
+ 'url': "/api",
+ 'description': "SwaggerHub API Auto Mocking",
+ }
+ ]
+
+ def get_host_from_settings(self, index, variables=None):
+ """Gets host URL based on the index and variables
+ :param index: array index of the host settings
+ :param variables: hash of variable and the corresponding value
+ :return: URL based on host settings
+ """
+ variables = {} if variables is None else variables
+ servers = self.get_host_settings()
+
+ try:
+ server = servers[index]
+ except IndexError:
+ raise ValueError(
+ "Invalid index {0} when selecting the host settings. "
+ "Must be less than {1}".format(index, len(servers)))
+
+ url = server['url']
+
+ # go through variables and replace placeholders
+ for variable_name, variable in server['variables'].items():
+ used_value = variables.get(
+ variable_name, variable['default_value'])
+
+ if 'enum_values' in variable \
+ and used_value not in variable['enum_values']:
+ raise ValueError(
+ "The variable `{0}` in the host URL has invalid value "
+ "{1}. Must be {2}.".format(
+ variable_name, variables[variable_name],
+ variable['enum_values']))
+
+ url = url.replace("{" + variable_name + "}", used_value)
+
+ return url
diff --git a/client/cloudharness_cli/cloudharness_cli/common/exceptions.py b/client/cloudharness_cli/cloudharness_cli/common/exceptions.py
new file mode 100644
index 00000000..0de59244
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/exceptions.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import six
+
+
+class OpenApiException(Exception):
+ """The base exception class for all OpenAPIExceptions"""
+
+
+class ApiTypeError(OpenApiException, TypeError):
+ def __init__(self, msg, path_to_item=None, valid_classes=None,
+ key_type=None):
+ """ Raises an exception for TypeErrors
+
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list): a list of keys an indices to get to the
+ current_item
+ None if unset
+ valid_classes (tuple): the primitive classes that current item
+ should be an instance of
+ None if unset
+ key_type (bool): False if our value is a value in a dict
+ True if it is a key in a dict
+ False if our item is an item in a list
+ None if unset
+ """
+ self.path_to_item = path_to_item
+ self.valid_classes = valid_classes
+ self.key_type = key_type
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiTypeError, self).__init__(full_msg)
+
+
+class ApiValueError(OpenApiException, ValueError):
+ def __init__(self, msg, path_to_item=None):
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list) the path to the exception in the
+ received_data dict. None if unset
+ """
+
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiValueError, self).__init__(full_msg)
+
+
+class ApiKeyError(OpenApiException, KeyError):
+ def __init__(self, msg, path_to_item=None):
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (None/list) the path to the exception in the
+ received_data dict
+ """
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiKeyError, self).__init__(full_msg)
+
+
+class ApiException(OpenApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None):
+ if http_resp:
+ self.status = http_resp.status
+ self.reason = http_resp.reason
+ self.body = http_resp.data
+ self.headers = http_resp.getheaders()
+ else:
+ self.status = status
+ self.reason = reason
+ self.body = None
+ self.headers = None
+
+ def __str__(self):
+ """Custom error messages for exception"""
+ error_message = "({0})\n"\
+ "Reason: {1}\n".format(self.status, self.reason)
+ if self.headers:
+ error_message += "HTTP response headers: {0}\n".format(
+ self.headers)
+
+ if self.body:
+ error_message += "HTTP response body: {0}\n".format(self.body)
+
+ return error_message
+
+
+def render_path(path_to_item):
+ """Returns a string representation of a path"""
+ result = ""
+ for pth in path_to_item:
+ if isinstance(pth, six.integer_types):
+ result += "[{0}]".format(pth)
+ else:
+ result += "['{0}']".format(pth)
+ return result
diff --git a/client/cloudharness_cli/cloudharness_cli/common/models/__init__.py b/client/cloudharness_cli/cloudharness_cli/common/models/__init__.py
new file mode 100644
index 00000000..afe53a9e
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/models/__init__.py
@@ -0,0 +1,16 @@
+# coding: utf-8
+
+# flake8: noqa
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+# import models into model package
diff --git a/client/cloudharness_cli/cloudharness_cli/common/rest.py b/client/cloudharness_cli/cloudharness_cli/common/rest.py
new file mode 100644
index 00000000..86b6724d
--- /dev/null
+++ b/client/cloudharness_cli/cloudharness_cli/common/rest.py
@@ -0,0 +1,296 @@
+# coding: utf-8
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import io
+import json
+import logging
+import re
+import ssl
+
+import certifi
+# python 2 and python 3 compatibility library
+import six
+from six.moves.urllib.parse import urlencode
+import urllib3
+
+from cloudharness_cli.common.exceptions import ApiException, ApiValueError
+
+
+logger = logging.getLogger(__name__)
+
+
+class RESTResponse(io.IOBase):
+
+ def __init__(self, resp):
+ self.urllib3_response = resp
+ self.status = resp.status
+ self.reason = resp.reason
+ self.data = resp.data
+
+ def getheaders(self):
+ """Returns a dictionary of the response headers."""
+ return self.urllib3_response.getheaders()
+
+ def getheader(self, name, default=None):
+ """Returns a given response header."""
+ return self.urllib3_response.getheader(name, default)
+
+
+class RESTClientObject(object):
+
+ def __init__(self, configuration, pools_size=4, maxsize=None):
+ # urllib3.PoolManager will pass all kw parameters to connectionpool
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
+ # maxsize is the number of requests to host that are allowed in parallel # noqa: E501
+ # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
+
+ # cert_reqs
+ if configuration.verify_ssl:
+ cert_reqs = ssl.CERT_REQUIRED
+ else:
+ cert_reqs = ssl.CERT_NONE
+
+ # ca_certs
+ if configuration.ssl_ca_cert:
+ ca_certs = configuration.ssl_ca_cert
+ else:
+ # if not set certificate file, use Mozilla's root certificates.
+ ca_certs = certifi.where()
+
+ addition_pool_args = {}
+ if configuration.assert_hostname is not None:
+ addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
+
+ if configuration.retries is not None:
+ addition_pool_args['retries'] = configuration.retries
+
+ if maxsize is None:
+ if configuration.connection_pool_maxsize is not None:
+ maxsize = configuration.connection_pool_maxsize
+ else:
+ maxsize = 4
+
+ # https pool manager
+ if configuration.proxy:
+ self.pool_manager = urllib3.ProxyManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=ca_certs,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ proxy_url=configuration.proxy,
+ proxy_headers=configuration.proxy_headers,
+ **addition_pool_args
+ )
+ else:
+ self.pool_manager = urllib3.PoolManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=ca_certs,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ **addition_pool_args
+ )
+
+ def request(self, method, url, query_params=None, headers=None,
+ body=None, post_params=None, _preload_content=True,
+ _request_timeout=None):
+ """Perform requests.
+
+ :param method: http request method
+ :param url: http request url
+ :param query_params: query parameters in the url
+ :param headers: http request headers
+ :param body: request json body, for `application/json`
+ :param post_params: request post parameters,
+ `application/x-www-form-urlencoded`
+ and `multipart/form-data`
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ """
+ method = method.upper()
+ assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
+ 'PATCH', 'OPTIONS']
+
+ if post_params and body:
+ raise ApiValueError(
+ "body parameter cannot be used with post_params parameter."
+ )
+
+ post_params = post_params or {}
+ headers = headers or {}
+
+ timeout = None
+ if _request_timeout:
+ if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
+ timeout = urllib3.Timeout(total=_request_timeout)
+ elif (isinstance(_request_timeout, tuple) and
+ len(_request_timeout) == 2):
+ timeout = urllib3.Timeout(
+ connect=_request_timeout[0], read=_request_timeout[1])
+
+ if 'Content-Type' not in headers:
+ headers['Content-Type'] = 'application/json'
+
+ try:
+ # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
+ if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
+ if query_params:
+ url += '?' + urlencode(query_params)
+ if re.search('json', headers['Content-Type'], re.IGNORECASE):
+ request_body = None
+ if body is not None:
+ request_body = json.dumps(body)
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=False,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'multipart/form-data':
+ # must del headers['Content-Type'], or the correct
+ # Content-Type which generated by urllib3 will be
+ # overwritten.
+ del headers['Content-Type']
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=True,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ # Pass a `string` parameter directly in the body to support
+ # other content types than Json when `body` argument is
+ # provided in serialized form
+ elif isinstance(body, str) or isinstance(body, bytes):
+ request_body = body
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ else:
+ # Cannot generate the request from given parameters
+ msg = """Cannot prepare a request message for provided
+ arguments. Please check that your arguments match
+ declared content type."""
+ raise ApiException(status=0, reason=msg)
+ # For `GET`, `HEAD`
+ else:
+ r = self.pool_manager.request(method, url,
+ fields=query_params,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ except urllib3.exceptions.SSLError as e:
+ msg = "{0}\n{1}".format(type(e).__name__, str(e))
+ raise ApiException(status=0, reason=msg)
+
+ if _preload_content:
+ r = RESTResponse(r)
+
+ # In the python 3, the response.data is bytes.
+ # we need to decode it to string.
+ if six.PY3:
+ r.data = r.data.decode('utf8')
+
+ # log response body
+ logger.debug("response body: %s", r.data)
+
+ if not 200 <= r.status <= 299:
+ raise ApiException(http_resp=r)
+
+ return r
+
+ def GET(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("GET", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("HEAD", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("OPTIONS", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def DELETE(self, url, headers=None, query_params=None, body=None,
+ _preload_content=True, _request_timeout=None):
+ return self.request("DELETE", url,
+ headers=headers,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def POST(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("POST", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def PUT(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PUT", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def PATCH(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PATCH", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
diff --git a/client/cloudharness_cli/cloudharness_cli/samples/api/workflows_api.py b/client/cloudharness_cli/cloudharness_cli/samples/api/workflows_api.py
index cedd3219..d240c2f0 100644
--- a/client/cloudharness_cli/cloudharness_cli/samples/api/workflows_api.py
+++ b/client/cloudharness_cli/cloudharness_cli/samples/api/workflows_api.py
@@ -37,6 +37,109 @@ def __init__(self, api_client=None):
api_client = ApiClient()
self.api_client = api_client
+ def error(self, **kwargs): # noqa: E501
+ """test sentry is working # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.error(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.error_with_http_info(**kwargs) # noqa: E501
+
+ def error_with_http_info(self, **kwargs): # noqa: E501
+ """test sentry is working # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.error_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method error" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = [] # noqa: E501
+
+ return self.api_client.call_api(
+ '/error', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
def submit_async(self, **kwargs): # noqa: E501
"""Send an asynchronous operation # noqa: E501
diff --git a/client/cloudharness_cli/docs/common/SentryApi.md b/client/cloudharness_cli/docs/common/SentryApi.md
new file mode 100644
index 00000000..be98af1c
--- /dev/null
+++ b/client/cloudharness_cli/docs/common/SentryApi.md
@@ -0,0 +1,65 @@
+# cloudharness_cli.common.SentryApi
+
+All URIs are relative to *http://localhost/api*
+
+Method | HTTP request | Description
+------------- | ------------- | -------------
+[**getdsn**](SentryApi.md#getdsn) | **GET** /sentry/getdsn/{appname} | Gets the Sentry DSN for a given application
+
+
+# **getdsn**
+> str getdsn(appname)
+
+Gets the Sentry DSN for a given application
+
+Gets the Sentry DSN for a given application
+
+### Example
+
+```python
+from __future__ import print_function
+import time
+import cloudharness_cli.common
+from cloudharness_cli.common.rest import ApiException
+from pprint import pprint
+
+# Enter a context with an instance of the API client
+with cloudharness_cli.common.ApiClient() as api_client:
+ # Create an instance of the API class
+ api_instance = cloudharness_cli.common.SentryApi(api_client)
+ appname = 'appname_example' # str |
+
+ try:
+ # Gets the Sentry DSN for a given application
+ api_response = api_instance.getdsn(appname)
+ pprint(api_response)
+ except ApiException as e:
+ print("Exception when calling SentryApi->getdsn: %s\n" % e)
+```
+
+### Parameters
+
+Name | Type | Description | Notes
+------------- | ------------- | ------------- | -------------
+ **appname** | **str**| |
+
+### Return type
+
+**str**
+
+### Authorization
+
+No authorization required
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**200** | Sentry DSN for the given application | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
diff --git a/client/cloudharness_cli/docs/samples/WorkflowsApi.md b/client/cloudharness_cli/docs/samples/WorkflowsApi.md
index 45b2dcc3..0d7cb1db 100644
--- a/client/cloudharness_cli/docs/samples/WorkflowsApi.md
+++ b/client/cloudharness_cli/docs/samples/WorkflowsApi.md
@@ -4,11 +4,63 @@ All URIs are relative to *https://samples.cloudharness.metacell.us/api*
Method | HTTP request | Description
------------- | ------------- | -------------
+[**error**](WorkflowsApi.md#error) | **GET** /error | test sentry is working
[**submit_async**](WorkflowsApi.md#submit_async) | **GET** /operation_async | Send an asynchronous operation
[**submit_sync**](WorkflowsApi.md#submit_sync) | **GET** /operation_sync | Send a synchronous operation
[**submit_sync_with_results**](WorkflowsApi.md#submit_sync_with_results) | **GET** /operation_sync_results | Send a synchronous operation and get results using the event queue. Just a sum, but in the cloud
+# **error**
+> str error()
+
+test sentry is working
+
+### Example
+
+```python
+from __future__ import print_function
+import time
+import cloudharness_cli.samples
+from cloudharness_cli.samples.rest import ApiException
+from pprint import pprint
+
+# Enter a context with an instance of the API client
+with cloudharness_cli.samples.ApiClient() as api_client:
+ # Create an instance of the API class
+ api_instance = cloudharness_cli.samples.WorkflowsApi(api_client)
+
+ try:
+ # test sentry is working
+ api_response = api_instance.error()
+ pprint(api_response)
+ except ApiException as e:
+ print("Exception when calling WorkflowsApi->error: %s\n" % e)
+```
+
+### Parameters
+This endpoint does not need any parameter.
+
+### Return type
+
+**str**
+
+### Authorization
+
+No authorization required
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**500** | Sentry entry should come! | - |
+**200** | This won't happen | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
# **submit_async**
> InlineResponse202 submit_async()
diff --git a/client/cloudharness_cli/test/common/__init__.py b/client/cloudharness_cli/test/common/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/client/cloudharness_cli/test/common/test_sentry_api.py b/client/cloudharness_cli/test/common/test_sentry_api.py
new file mode 100644
index 00000000..57a9bb24
--- /dev/null
+++ b/client/cloudharness_cli/test/common/test_sentry_api.py
@@ -0,0 +1,40 @@
+# coding: utf-8
+
+"""
+ CH common service API
+
+ Cloud Harness Platform - Reference CH service API # noqa: E501
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import unittest
+
+import cloudharness_cli.common
+from cloudharness_cli.common.api.sentry_api import SentryApi # noqa: E501
+from cloudharness_cli.common.rest import ApiException
+
+
+class TestSentryApi(unittest.TestCase):
+ """SentryApi unit test stubs"""
+
+ def setUp(self):
+ self.api = cloudharness_cli.common.api.sentry_api.SentryApi() # noqa: E501
+
+ def tearDown(self):
+ pass
+
+ def test_getdsn(self):
+ """Test case for getdsn
+
+ Gets the Sentry DSN for a given application # noqa: E501
+ """
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/client/cloudharness_cli/test/samples/test_workflows_api.py b/client/cloudharness_cli/test/samples/test_workflows_api.py
index 944a51ad..71e95c81 100644
--- a/client/cloudharness_cli/test/samples/test_workflows_api.py
+++ b/client/cloudharness_cli/test/samples/test_workflows_api.py
@@ -29,6 +29,13 @@ def setUp(self):
def tearDown(self):
pass
+ def test_error(self):
+ """Test case for error
+
+ test sentry is working # noqa: E501
+ """
+ pass
+
def test_submit_async(self):
"""Test case for submit_async
diff --git a/cloudharness.png b/cloudharness.png
new file mode 100644
index 00000000..1966e6dd
Binary files /dev/null and b/cloudharness.png differ
diff --git a/infrastructure/base-images/cloudharness-base/Dockerfile b/infrastructure/base-images/cloudharness-base/Dockerfile
index 591e0bea..582cdad9 100644
--- a/infrastructure/base-images/cloudharness-base/Dockerfile
+++ b/infrastructure/base-images/cloudharness-base/Dockerfile
@@ -3,10 +3,16 @@ FROM ${PARENT}
RUN apk update
RUN apk upgrade
+
+# Add bash for k8s console
RUN apk add bash
+# dev tools needed by some python libraries
+RUN apk add gcc libc-dev g++ python3-dev libffi-dev openssl-dev
COPY libraries/cloudharness-common /libraries/cloudharness-common
+COPY client/cloudharness_cli /client/cloudharness_cli
RUN pip install /libraries/cloudharness-common
+RUN pip install /client/cloudharness_cli
WORKDIR /
\ No newline at end of file
diff --git a/libraries/cloudharness-common/MANIFEST.in b/libraries/cloudharness-common/MANIFEST.in
index 6021ca4c..c9cb6eb9 100644
--- a/libraries/cloudharness-common/MANIFEST.in
+++ b/libraries/cloudharness-common/MANIFEST.in
@@ -1 +1,2 @@
-include cloudharness/utils/resources/*
\ No newline at end of file
+include cloudharness/utils/resources/*
+include cloudharness/service/templates/*
\ No newline at end of file
diff --git a/libraries/cloudharness-common/README.md b/libraries/cloudharness-common/README.md
index e901157d..cf1a23ab 100644
--- a/libraries/cloudharness-common/README.md
+++ b/libraries/cloudharness-common/README.md
@@ -10,7 +10,7 @@ In order to use all `cloudharness` functionalities inside the cluster you must
define your Dockerfile depending on the base cloudharness as following:
```Dockerfile
-ARG REGISTRY=r.cfcr.io/tarelli/
+ARG REGISTRY
ARG TAG=latest
FROM ${REGISTRY}cloudharness-base:${TAG}
```
diff --git a/libraries/cloudharness-common/cloudharness/__init__.py b/libraries/cloudharness-common/cloudharness/__init__.py
index 1421bd3c..438eae54 100644
--- a/libraries/cloudharness-common/cloudharness/__init__.py
+++ b/libraries/cloudharness-common/cloudharness/__init__.py
@@ -9,8 +9,29 @@ def set_debug():
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+# TODO log will write through a rest service
-__all__ = ['log']
+class NotCorrectlyInitialized(Exception):
+ pass
-# TODO log will write through a rest service
+def init(appname: str):
+ """
+ Init cloudharness functionality for the current app
+
+ Args:
+ appname: the slug of the application
+
+ Usage examples:
+ import cloudharness
+ cloudharness.init('workspaces')
+ """
+ if not appname:
+ raise NotCorrectlyInitialized
+ try:
+ import cloudharness.sentry
+ sentry.init(appname)
+ except Exception as e:
+ log.warning(f'Error enabling Sentry for {appname}', exc_info=True)
+
+__all__ = ['log', 'init']
diff --git a/libraries/cloudharness-common/cloudharness/applications.py b/libraries/cloudharness-common/cloudharness/applications.py
new file mode 100644
index 00000000..7e9c1286
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/applications.py
@@ -0,0 +1,42 @@
+from cloudharness.utils.config import CloudharnessConfig, ConfigObject
+
+
+class ApplicationConfiguration:
+
+ def __init__(self, conf_dict_or_config_object):
+ if isinstance(conf_dict_or_config_object, dict):
+ self.conf = ConfigObject(conf_dict_or_config_object)
+ else:
+ self.conf = conf_dict_or_config_object
+ self.name = self.conf.name
+ self.harness = self.conf.harness
+
+ def __getitem__(self, key_or_path):
+ item = self.conf[key_or_path]
+ if (isinstance(item, ConfigObject) or isinstance(item, dict)) and item['harness']:
+ item = ApplicationConfiguration(item)
+ return item
+
+ def __getattr__(self, item):
+ return self[item]
+
+ def is_auto_service(self):
+ return self['harness.service.auto']
+
+ def is_auto_deployment(self):
+ return self['harness.deployment.auto']
+
+ def is_sentry_enabled(self):
+ return self['harness.sentry']
+
+
+def get_configurations(**kwargs):
+ return [ApplicationConfiguration(conf) for conf in CloudharnessConfig.get_application_by_filter(**kwargs)]
+
+
+def get_configuration(app_name):
+ conf = CloudharnessConfig.get_application_by_filter(name=app_name)
+ if conf:
+ if len(conf) > 1:
+ raise Exception(f'More than one app with the same name is not allowed. Found {len(conf)} applications with name {app_name}')
+ return ApplicationConfiguration(conf[0])
diff --git a/libraries/cloudharness-common/cloudharness/auth/keycloak/__init__.py b/libraries/cloudharness-common/cloudharness/auth/keycloak/__init__.py
index dd5cac4f..7d454a94 100644
--- a/libraries/cloudharness-common/cloudharness/auth/keycloak/__init__.py
+++ b/libraries/cloudharness-common/cloudharness/auth/keycloak/__init__.py
@@ -5,9 +5,34 @@
import requests
from urllib.parse import urljoin
from typing import List
-from flask import current_app
+from flask import current_app, request
+from keycloak import KeycloakAdmin
+from keycloak.exceptions import KeycloakAuthenticationError
+
+from typing import List
+from urllib.parse import urljoin
+
from cloudharness.utils import env
+try:
+ from cloudharness.utils.config import CloudharnessConfig as conf
+ accounts_app = conf.get_application_by_filter(name='accounts')[0]
+ AUTH_REALM = env.get_auth_realm()
+ SCHEMA = 'http'
+ HOST = getattr(accounts_app,'subdomain')
+ PORT = getattr(accounts_app,'port')
+ USER = getattr(accounts_app.admin,'user')
+ PASSWD = getattr(accounts_app.admin,'pass')
+except:
+ AUTH_REALM = 'mnp'
+ SCHEMA = 'https'
+ HOST = 'accounts.mnp.metacell.us'
+ PORT = '443'
+ USER = 'mnp'
+ PASSWD = 'metacell'
+
+SERVER_URL = f'{SCHEMA}://{HOST}:{PORT}/auth/'
+
def decode_token(token):
"""
Check and retrieve authentication information from custom bearer token.
@@ -19,22 +44,339 @@ def decode_token(token):
:return: Decoded token information or None if token is invalid
:rtype: dict | None
"""
- SCHEMA = 'https://'
- AUTH_DOMAIN = env.get_auth_service_cluster_address()
- AUTH_REALM = env.get_auth_realm()
- BASE_PATH = f"//{os.path.join(AUTH_DOMAIN, 'auth/realms', AUTH_REALM)}"
- AUTH_PUBLIC_KEY_URL = urljoin(SCHEMA, BASE_PATH)
- KEY = json.loads(requests.get(AUTH_PUBLIC_KEY_URL, verify=False).text)['public_key']
+ decoded = AuthClient.decode_token(token)
+ valid = 'offline_access' in decoded['realm_access']['roles']
+ current_app.logger.debug(valid)
+ return {'uid': 'user_id'}
- KEY = f"-----BEGIN PUBLIC KEY-----\n{KEY}\n-----END PUBLIC KEY-----"
- try:
- decoded = jwt.decode(token, KEY, audience='accounts', algorithms='RS256')
- except:
- current_app.logger.debug(f"Error validating user: {sys.exc_info()}")
- return None
+class AuthClient():
- valid = 'offline_access' in decoded['realm_access']['roles']
- current_app.logger.debug(valid)
- return {'uid': 'user_id'}
\ No newline at end of file
+ @staticmethod
+ def _get_keycloak_user_id():
+ bearer = request.headers.get('Authorization', None)
+ current_app.logger.debug(f'Bearer: {bearer}')
+ if not bearer or bearer == 'Bearer undefined':
+ if current_app.config['ENV'] == 'development':
+ # when development and not using KeyCloak (no current user),
+ # get id from X-Current-User-Id header
+ keycloak_user_id = request.headers.get("X-Current-User-Id", "-1")
+ else:
+ keycloak_user_id = "-1" # No authorization --> no user
+ else:
+ token = bearer.split(' ')[1]
+ keycloak_user_id = AuthClient.decode_token(token)['sub']
+ return keycloak_user_id
+
+ def __init__(self):
+ """
+ Init the class and checks the connectivity to the KeyCloak server
+ """
+ # test if we can connect to the Keycloak server
+ dummy_client = self.get_admin_client()
+
+ def get_admin_client(self):
+ """
+ Setup and return a keycloak admin client
+
+ The client will connect to the Keycloak server with the default admin credentials
+ and connects to the 'master' realm. The client uses the application realm for read/write
+ to the Keycloak server
+
+ :return: KeycloakAdmin
+ """
+ if not getattr(self, "_admin_client", None):
+ self._admin_client = KeycloakAdmin(
+ server_url=SERVER_URL,
+ username=USER,
+ password=PASSWD,
+ realm_name=AUTH_REALM,
+ user_realm_name='master',
+ verify=True)
+ try:
+ # test if the connection still is authenticated, if not refresh the token
+ dummy = self._admin_client.get_realms()
+ except KeycloakAuthenticationError:
+ self._admin_client.refresh_token()
+ return self._admin_client
+
+ @staticmethod
+ def decode_token(token):
+ """
+ Check and retrieve authentication information from custom bearer token.
+ Returned value will be passed in 'token_info' parameter of your operation function, if there is one.
+ 'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one.
+
+ :param token Token provided by Authorization header
+ :type token: str
+ :return: Decoded token information or None if token is invalid
+ :rtype: dict | None
+ """
+ AUTH_PUBLIC_KEY_URL = f'{SERVER_URL}realms/{AUTH_REALM}'
+
+ KEY = json.loads(requests.get(AUTH_PUBLIC_KEY_URL, verify=False).text)['public_key']
+ KEY = b"-----BEGIN PUBLIC KEY-----\n" + str.encode(KEY) + b"\n-----END PUBLIC KEY-----"
+
+ decoded = jwt.decode(token, KEY, algorithms='RS256', audience='account')
+ return decoded
+
+ def get_client(self, client_name):
+ """
+ Return the KC client
+
+ ClientRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_clientrepresentation
+
+ :param client_name: Name of the client to retrieve
+ :return: ClientRepresentation or False when not found
+ """
+ admin_client = self.get_admin_client()
+ try:
+ client_id = admin_client.get_client_id(client_name)
+ client = admin_client.get_client(client_id)
+ except:
+ return False
+ return client
+
+ def create_client(self,
+ client_name,
+ protocol="openid-connect",
+ enabled=True,
+ public=True,
+ standard_flow_enabled=True,
+ direct_access_grants_enable=True,
+ redirect_uris=["*"],
+ web_origins=["*","+"]):
+ """
+ Creates a new KC client
+
+ :param client_name: Name of the client
+ :param protocol: defaults to openid-connect
+ :param enabled: defaults to True
+ :param public: defaults to True
+ :param standard_flow_enabled: defaults to True
+ :param direct_access_grants_enable: defaults to True
+ :param redirect_uris: defaults to ["*"],
+ :param web_origins: defaults to ["*","+"]
+ :return: True on success or exception
+ """
+ admin_client = self.get_admin_client()
+ admin_client.create_client({
+ 'id': client_name,
+ 'name': client_name,
+ 'protocol': protocol,
+ 'enabled': enabled,
+ 'publicClient': public,
+ 'standardFlowEnabled': standard_flow_enabled,
+ 'directAccessGrantsEnabled': direct_access_grants_enable,
+ 'redirectUris': redirect_uris,
+ 'webOrigins': web_origins
+ })
+ return True
+
+ def create_client_role(self, client_id, role):
+ """
+ Creates a new client role if not exists
+
+ :param client_id: the id of the client under which the role will be created
+ :param role: the name of the client role
+ :return: True on success, False on error
+ """
+ admin_client = self.get_admin_client()
+ try:
+ admin_client.create_client_role(
+ client_id,
+ {
+ 'name': role,
+ 'clientRole': True
+ }
+ )
+ except:
+ return False
+ return True
+
+ def get_group(self, group_id, with_members=False):
+ """
+ Return the group in the application realm
+
+ GroupRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_grouprepresentation
+
+ :param with_members: If set the members (users) of the group are added to the group. Defaults to False
+ :return: GroupRepresentation + UserRepresentation
+ """
+ admin_client = self.get_admin_client()
+ group = admin_client.get_group(group_id)
+ if with_members:
+ members = admin_client.get_group_members(group_id)
+ group.update({'members': members})
+ return group
+
+ def get_groups(self, with_members=False):
+ """
+ Return a list of all groups in the application realm
+
+ GroupRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_grouprepresentation
+
+ :param with_members: If set the members (users) of the group(s) are added to the group. Defaults to False
+ :return: List(GroupRepresentation)
+ """
+ admin_client = self.get_admin_client()
+ groups = []
+ for group in admin_client.get_groups():
+ groups.append(self.get_group(group['id'], with_members))
+ return groups
+
+ def get_users(self):
+ """
+ Return a list of all users in the application realm
+
+ UserRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_userrepresentation
+
+ GroupRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_grouprepresentation
+
+ :return: List(UserRepresentation + GroupRepresentation)
+ """
+ admin_client = self.get_admin_client()
+ users = []
+ for user in admin_client.get_users():
+ user.update({'userGroups': admin_client.get_user_groups(user['id'])})
+ users.append(user)
+ return users
+
+ def get_user(self, user_id):
+ """
+ Get the user including the user groups
+
+ :param user_id: User id
+
+ UserRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_userrepresentation
+
+ GroupRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_grouprepresentation
+
+ :return: UserRepresentation + GroupRepresentation
+ """
+ admin_client = self.get_admin_client()
+ user = admin_client.get_user(user_id)
+ user.update({'userGroups': admin_client.get_user_groups(user_id)})
+ return user
+
+ def get_current_user(self):
+ """
+ Get the current user including the user groups
+
+ UserRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_userrepresentation
+
+ GroupRepresentation
+ https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_grouprepresentation
+
+ :return: UserRepresentation + GroupRepresentation
+ """
+ return self.get_user(self._get_keycloak_user_id())
+
+ def get_user_client_roles(self, user_id, client_name):
+ """
+ Get the user including the user resource access
+
+ :param user_id: User id
+ :param client_name: Client name
+ :return: (array RoleRepresentation)
+ """
+ admin_client = self.get_admin_client()
+ client_id = admin_client.get_client_id(client_name)
+ return admin_client.get_client_roles_of_user(user_id, client_id)
+
+ def get_current_user_client_roles(self, client_name):
+ """
+ Get the user including the user resource access
+
+ :param client_name: Client name
+ :return: UserRepresentation + GroupRepresentation
+ """
+ cur_user_id = self._get_keycloak_user_id()
+ return self.get_user_client_roles(cur_user_id, client_name)
+
+ def user_has_client_role(self, user_id, client_name, role):
+ """
+ Tests if the user has the given role within the given client
+
+ :param user_id: User id
+ :param client_name: Name of the client
+ :param role: Name of the role
+ :return: (array RoleRepresentation)
+ """
+ roles = [user_client_role for user_client_role in self.get_user_client_roles(user_id, client_name) if user_client_role['name'] == role]
+ return roles != []
+
+ def current_user_has_client_role(self, client_name, role):
+ """
+ Tests if the current user has the given role within the given client
+
+ :param client_name: Name of the client
+ :param role: Name of the role
+ :return: (array RoleRepresentation)
+ """
+ return self.user_has_client_role(
+ self._get_keycloak_user_id(),
+ client_name,
+ role)
+
+ def get_client_role_members(self, client_name, role):
+ """
+ Get all users for the specified client and role
+
+ :param client_name: Client name
+ :param role: Role name
+ :return: List(UserRepresentation)
+ """
+ admin_client = self.get_admin_client()
+ client_id = admin_client.get_client_id(client_name)
+ return admin_client.get_client_role_members(client_id, role)
+
+ def user_add_update_attribute(self, user_id, attribute_name, attribute_value):
+ """
+ Adds or when exists updates the attribute to/of the User with the attribute value
+
+ param user_id: id of the user
+ param attribute_name: name of the attribute to add/update
+ param attribute_value: value of the attribute
+ :return: boolean True on success
+ """
+ admin_client = self.get_admin_client()
+ user = self.get_user(user_id)
+ attributes = user.get('attributes', {})
+ attributes[attribute_name] = attribute_value
+ admin_client.update_user(
+ user_id,
+ {
+ 'attributes': attributes
+ })
+ return True
+
+ def user_delete_attribute(self, user_id, attribute_name):
+ """
+ Deletes the attribute to/of the User with the attribute value
+
+ param user_id: id of the user
+ param attribute_name: name of the attribute to delete
+ :return: boolean True on success, False is attribute not in user attributes
+ """
+ admin_client = self.get_admin_client()
+ user = self.get_user(user_id)
+ attributes = user.get('attributes', None)
+ if attributes and attribute_name in attributes:
+ del attributes[attribute_name]
+ admin_client.update_user(
+ user_id,
+ {
+ 'attributes': attributes
+ })
+ return True
+ return False
diff --git a/libraries/cloudharness-common/cloudharness/events/client.py b/libraries/cloudharness-common/cloudharness/events/client.py
index 5096a5ee..6bf05b99 100644
--- a/libraries/cloudharness-common/cloudharness/events/client.py
+++ b/libraries/cloudharness-common/cloudharness/events/client.py
@@ -1,5 +1,9 @@
import os
import sys
+import threading
+import time
+import traceback
+
from time import sleep
from json import dumps, loads
from kafka import KafkaProducer, KafkaConsumer
@@ -17,6 +21,15 @@ def __init__(self, topic_id):
self.topic_id = topic_id
self.service = env.get_cloudharness_events_service()
+ def _get_consumer(self, group_id='default') -> KafkaConsumer:
+ return KafkaConsumer(self.topic_id,
+ bootstrap_servers=self.service,
+ auto_offset_reset='earliest',
+ enable_auto_commit=True,
+ group_id=group_id,
+ value_deserializer=lambda x: loads(x.decode('utf-8')))
+
+
def create_topic(self):
""" Connects to cloudharness Events and creates a new topic
Return:
@@ -61,12 +74,7 @@ def produce(self, message: dict):
def consume_all(self, group_id='default') -> list:
''' Return a list of messages published in the topic '''
- consumer = KafkaConsumer(self.topic_id,
- bootstrap_servers=self.service,
- auto_offset_reset='earliest',
- enable_auto_commit=True,
- group_id=group_id,
- value_deserializer=lambda x: loads(x.decode('utf-8')))
+ consumer = _get_consumer(group_id)
try:
for topic in consumer.poll(10000).values():
return [record.value for record in topic]
@@ -94,11 +102,46 @@ def delete_topic(self) -> bool:
log.error(f"Ups... We had an error deleteing the Topic {self.topic_id} --> {e}")
raise EventGeneralException from e
+ def close(self):
+ if getattr(self, '_consumer_thread'):
+ self._consumer_thread.cancel()
+
+ def _consume_task(self, app=None, group_id=None, handler=None):
+ log.info(f'Kafka consumer thread started, listening for messages in queue: {self.topic_id}')
+ while True:
+ try:
+ consumer = self._get_consumer(group_id)
+ for message in consumer:
+ try:
+ handler(app, message.value)
+ except Exception as e:
+ log.error(f"Ups... there was an error during execution of the consumer Topc {self.topic_id} --> {e}")
+ log.error(traceback.print_exc())
+ consumer.close()
+ except Exception as e:
+ log.error(f"Ups... there was an error during execution of the consumer Topc {self.topic_id} --> {e}")
+ log.error(traceback.print_exc())
+ time.sleep(10)
+
+ log.info(f'Kafka consumer thread {self.topic_id} stopped')
+
+ def async_consume(self, app=None, handler=None, group_id='default'):
+ log.debug('creating thread')
+ if app:
+ log.debug('get current object from app')
+ app = app._get_current_object()
+ self._consumer_thread = threading.Thread(
+ target=self._consume_task,
+ kwargs={'app': app,
+ 'group_id': group_id,
+ 'handler': handler})
+ self._consumer_thread.start()
+ log.debug('thread started')
if __name__ == "__main__":
# creat the required os env variables
- os.environ['CLOUDHARNESS_EVENTS_CLIENT_ID'] = 'my-client'
- os.environ['CLOUDHARNESS_EVENTS_SERVICE'] = 'bootstrap.cloudharness.svc.cluster.local:9092'
+ os.environ['CLOUDHARNESS_EVENTS_CLIENT_ID'] = env.get_cloudharness_events_client_id()
+ os.environ['CLOUDHARNESS_EVENTS_SERVICE'] = env.get_cloudharness_events_service()
# instantiate the client
client = EventClient('test-sync-op-results-qcwbc')
diff --git a/libraries/cloudharness-common/cloudharness/sentry/__init__.py b/libraries/cloudharness-common/cloudharness/sentry/__init__.py
new file mode 100644
index 00000000..ead24fef
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/sentry/__init__.py
@@ -0,0 +1,55 @@
+import json
+import requests
+
+from cloudharness.utils.env import get_common_service_cluster_address
+
+def get_dsn(appname):
+ """
+ Helper function for getting the Sentry DSN of the project of the application
+ If the application has no project in Sentry, the project will be created and
+ linked to the default organisation Sentry and team Sentry
+
+ Args:
+ appname: the slug of the application
+
+ Returns:
+ Sentry DSN
+
+ Usage examples:
+ from cloudharness.sentry import get_dsn
+ dsn = get_dsn('workspaces')
+ """
+ url = get_common_service_cluster_address() + f'/api/sentry/getdsn/{appname}'
+ response = requests.get(url, verify=False).json()
+ dsn = response['dsn']
+ if dsn and len(dsn)>0:
+ return dsn
+ else:
+ return None
+
+def init(appname):
+ """
+ Init cloudharness Sentry functionality for the current app
+
+ Args:
+ appname: the slug of the application
+
+ Usage examples:
+ import cloudharness.sentry as sentry
+ sentry.init('workspaces')
+ """
+ dsn = get_dsn(appname)
+ if dsn:
+ import sentry_sdk
+ try:
+ from flask import current_app as app
+ from sentry_sdk.integrations.flask import FlaskIntegration
+ integrations = [FlaskIntegration]
+ except:
+ integrations = []
+ sentry_sdk.init(
+ dsn=dsn,
+ integrations=[FlaskIntegration()]
+ )
+
+__all__ = ['get_dsn', 'init']
diff --git a/libraries/cloudharness-common/cloudharness/service/__init__.py b/libraries/cloudharness-common/cloudharness/service/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/libraries/cloudharness-common/cloudharness/service/pvc.py b/libraries/cloudharness-common/cloudharness/service/pvc.py
new file mode 100644
index 00000000..31d5022c
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/service/pvc.py
@@ -0,0 +1,74 @@
+import os
+import kubernetes
+import yaml
+
+from cloudharness.utils.config import CloudharnessConfig as conf
+
+def _get_api():
+ try:
+ configuration = kubernetes.config.load_incluster_config()
+ except:
+ configuration = kubernetes.config.load_kube_config()
+ api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
+ return api_instance
+
+def create_persistent_volume_claim(name, size, logger, **kwargs):
+ """
+ Create a Persistent Volume Claim in the Kubernetes cluster.
+ If a PVC with the name given already exists then the function
+ will just return to the caller function.
+
+ Args:
+ name (string): the name of the PVC
+ size (string): the size of the PVC, e.g. 2Gi for a 2Gb PVC
+ logger (logger): the logger where the information message is sent to
+
+ Returns:
+ -
+ """
+ if not size:
+ raise Exception(f"Size must be set. Got {size!r}.")
+
+ if not persistent_volume_claim_exists(name):
+ path = os.path.join(os.path.dirname(__file__), 'templates', 'pvc.yaml')
+ tmpl = open(path, 'rt').read()
+ text = tmpl.format(name=name, size=size)
+ data = yaml.safe_load(text)
+
+ obj = _get_api().create_namespaced_persistent_volume_claim(
+ namespace=conf.get_configuration()['namespace'],
+ body=data,
+ )
+ logger.info(f"PVC child is created: %s", obj)
+
+def persistent_volume_claim_exists(name):
+ """
+ Check if the PVC with the given name already exists.
+
+ Args:
+ name (string): the name of the PVC
+
+ Returns:
+ boolean: True if the PVC exists, False is the PVC doesn't exist
+ """
+ if get_persistent_volume_claim(name):
+ return True
+ return False
+
+def get_persistent_volume_claim(name):
+ """
+ Get the Persistent Volume Claim with the given name from the Kubernetes
+ cluster.
+
+ Args:
+ name (string): the name of the PVC
+
+ Returns:
+ The PVC data (see https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+ """
+ foundPVCs = _get_api().list_namespaced_persistent_volume_claim(
+ namespace=conf.get_configuration()['namespace'],
+ field_selector=f'metadata.name={name}')
+ if len(foundPVCs.items)>0:
+ return foundPVCs.items[0]
+ return None
diff --git a/libraries/cloudharness-common/cloudharness/service/templates/pvc.yaml b/libraries/cloudharness-common/cloudharness/service/templates/pvc.yaml
new file mode 100644
index 00000000..a5debac6
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/service/templates/pvc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: "{name}"
+ annotations:
+ volume.beta.kubernetes.io/storage-class: standard
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "{size}"
diff --git a/libraries/cloudharness-common/cloudharness/utils/config.py b/libraries/cloudharness-common/cloudharness/utils/config.py
new file mode 100644
index 00000000..4ecc8dfd
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/utils/config.py
@@ -0,0 +1,115 @@
+import yaml
+import os
+
+ALLVALUES_PATH = '/opt/cloudharness/resources/allvalues.yaml'
+
+
+class ConfigObject(object):
+ def __init__(self, dictionary):
+ self.conf = dictionary
+ for key, val in dictionary.items():
+ if isinstance(val, (list, tuple)):
+ setattr(self, key, [ConfigObject(x) if isinstance(x, dict) else x for x in val])
+ else:
+ setattr(self, key, ConfigObject(val) if isinstance(val, dict) else val)
+
+ def __getitem__(self, key_or_path):
+ obj = self.conf
+ for k in key_or_path.split('.'):
+ if not k in obj:
+ return None
+ obj = obj[k]
+ return obj
+
+class CloudharnessConfig:
+ """
+ Helper class for the Cloud Harness configuration
+
+ The CH configuration will be loaded from the values.yaml generated by helm
+ via the harness-deployment script
+
+ """
+ allvalues=None
+
+ @classmethod
+ def _get_all_values(cls):
+ if not cls.allvalues and os.path.exists(ALLVALUES_PATH):
+ with open(ALLVALUES_PATH) as f:
+ cls.allvalues = yaml.safe_load(f)
+ return cls.allvalues
+
+ @classmethod
+ def _get_apps(cls):
+ if not hasattr(cls, 'apps'):
+ cls.apps = ConfigObject(cls._get_all_values()['apps'])
+ return cls.apps
+
+ @classmethod
+ def get_application_by_filter(cls, **filter):
+ """
+ Helper function for filtering CH app objects
+
+ Args:
+ filter: the filter e.g. harness__deployment__auto=True
+
+ Returns:
+ list of app objects (see values.yaml for a detailed description)
+
+ Usage examples:
+ from cloudharness.utils.config import CloudharnessConfig as conf
+ conf.get_application_by_filter(harness__deployment__auto=True)
+ conf.get_application_by_filter(name='workflows')
+ """
+ apps = []
+ filter_keys = next(iter(filter)).split('__')
+ filter_value = next(iter(filter.values()))
+ all_apps = cls._get_apps()
+ for app_key in cls.get_applications():
+ app = getattr(all_apps, app_key)
+ tmp_obj = app
+ try:
+ for key in filter_keys:
+ tmp_obj = getattr(tmp_obj, key)
+ if (tmp_obj == filter_value) or \
+ (filter_value == False and tmp_obj is None) or \
+ (filter_value == True and tmp_obj is not None):
+ apps.append(app)
+ except AttributeError:
+ pass
+ return apps
+
+ @classmethod
+ def get_configuration(cls):
+ """
+ Helper function for getting all CH config values
+
+ Args:
+ -
+
+ Returns:
+ dictionary of allvalues.yaml (see values.yaml for a detailed description)
+
+ Usage examples:
+ from cloudharness.utils.config import CloudharnessConfig as conf
+ ch_conf = conf.get_configuration()
+ workflows = ch_conf['apps']['workflows']
+ """
+ return cls._get_all_values()
+
+ @classmethod
+ def get_applications(cls):
+ """
+ Helper function for getting all CH apps from allvalues.yaml
+
+ Args:
+ -
+
+ Returns:
+ dictionary of apps from allvalues.yaml (see values.yaml for a detailed description)
+
+ Usage examples:
+ from cloudharness.utils.config import CloudharnessConfig as conf
+ ch_apps = conf.get_applications()
+ workflows = ch_apps['worksflows']
+ """
+ return cls.get_configuration()['apps']
diff --git a/libraries/cloudharness-common/cloudharness/utils/env.py b/libraries/cloudharness-common/cloudharness/utils/env.py
index f2849d09..cfa089f0 100644
--- a/libraries/cloudharness-common/cloudharness/utils/env.py
+++ b/libraries/cloudharness-common/cloudharness/utils/env.py
@@ -4,14 +4,16 @@
from .. import log
+from .config import CloudharnessConfig as conf
+
TEST = 'TEST'
PROD = 'PROD'
VARIABLE_IMAGE_REGISTRY = 'CH_IMAGE_REGISTRY'
SUFFIX_TAG = 'IMAGE_TAG'
-SUFFIX_PORT = 'PORT'
-SUFFIX_NAME = 'NAME'
+SUFFIX_PORT = 'SERVICE_PORT'
+SUFFIX_NAME = 'SERVICE_HOST'
DEFAULT_IMAGE_REGISTRY = ''
@@ -19,13 +21,22 @@
def set_default_environment():
- with open(HERE + '/resources/values.yaml') as f:
- values = yaml.safe_load(f)
- os.environ.update({v['name']: str(v['value']) for v in values['env'] if v['name'] not in os.environ})
+ values = conf.get_configuration()
+ if values:
+ os.environ.update({v['name']: str(v["value"]) for v in values['env'] if v['name'] not in os.environ})
set_default_environment()
+def get_namespace():
+ try:
+ namespace=conf.get_configuration()['namespace']
+ except:
+ namespace=''
+ return namespace
+
+namespace=get_namespace()
+
class VariableNotFound(Exception):
def __init__(self, variable_name):
self.variable_name = variable_name
@@ -48,7 +59,7 @@ def get_image_full_tag(image_repository_name):
tagged = f"{image_repository_name}:{get_image_tag(image_repository_name)}"
registry = get_image_registry()
if registry:
- return registry + '/' + tagged
+ return registry.strip('/') + '/' + tagged
return tagged
@@ -79,21 +90,22 @@ def name_to_variable(application_name):
# CloudHarness Events
def get_cloudharness_events_client_id():
- return get_variable('CH_KEYCLOAK_WEBCLIENT_ID')
+ accounts_app = conf.get_application_by_filter(name='accounts')[0]
+ return accounts_app.webclient.id
def get_cloudharness_events_service():
- return get_service_cluster_address('CH_KAFKA')
+ return get_service_cluster_address('BOOTSTRAP')
def get_service_cluster_address(cloudharness_app_name):
if use_public_services():
return get_service_public_address(cloudharness_app_name)
- return cluster_service_address(get_sub_variable(cloudharness_app_name, SUFFIX_NAME)) + ':' + get_sub_variable(cloudharness_app_name, SUFFIX_PORT)
+ return get_sub_variable(cloudharness_app_name, SUFFIX_NAME) + ':' + get_sub_variable(cloudharness_app_name, SUFFIX_PORT)
def cluster_service_address(service_name):
- return + f'{service_name}.{namespace}.svc.cluster.local'
+ return f'{service_name}.{namespace}.svc.cluster.local'
def use_public_services():
@@ -107,7 +119,7 @@ def get_sub_variable(*vars):
def get_service_public_address(app_name):
- return ".".join([get_sub_variable(app_name, 'SUBDOMAIN'), get_public_domain()])
+ return ".".join([get_sub_variable('CH', app_name.upper(), 'SUBDOMAIN'), get_public_domain()])
def get_public_domain():
@@ -115,16 +127,29 @@ def get_public_domain():
def get_cloudharness_workflows_service_url():
- return get_service_public_address('CH_WORKFLOWS')
+ return get_service_public_address('WORKFLOWS')
+
+def get_cloudharness_sentry_service_url():
+ return 'https://' + get_service_public_address('sentry')
+
+def get_sentry_service_cluster_address():
+ sentry_app = conf.get_application_by_filter(name='sentry')[0]
+ return f'http://{sentry_app.name}:{sentry_app.port}'
+
+def get_cloudharness_common_service_url():
+ return 'https://' + get_service_public_address('common')
+def get_common_service_cluster_address():
+ common_app = conf.get_application_by_filter(name='common')[0]
+ return f'http://{common_app.name}:{common_app.port}'
def get_auth_service_cluster_address():
- return get_service_cluster_address('CH_KEYCLOAK')
+ return get_service_cluster_address('ACCOUNTS')
def get_auth_service_url():
- return get_service_public_address('CH_KEYCLOAK')
+ return get_service_public_address('ACCOUNTS')
def get_auth_realm():
- return get_variable('CH_KEYCLOAK_REALM')
+ return get_variable('CH_ACCOUNTS_REALM')
diff --git a/libraries/cloudharness-common/cloudharness/utils/resources/values.yaml b/libraries/cloudharness-common/cloudharness/utils/resources/values.yaml
deleted file mode 100644
index 2bcab846..00000000
--- a/libraries/cloudharness-common/cloudharness/utils/resources/values.yaml
+++ /dev/null
@@ -1,245 +0,0 @@
-apps:
- argo:
- enabled: true
- name: argo-ui-gk
- port: 80
- subdomain: argo
- docs:
- enabled: true
- harvest: false
- image:
- name: cloudharness-docs
- tag: 1
- name: cloudharness-docs
- port: 8080
- subdomain: docs
- events:
- enabled: true
- name: kafka-manager-gk
- port: 80
- subdomain: events
- kafka:
- name: bootstrap
- port: 9092
- keycloak:
- admin:
- pass: metacell
- role: administrator
- user: admin
- client:
- id: rest-client
- secret: 5678eb6e-9e2c-4ee5-bd54-34e7411339e8
- db:
- image:
- name: postgres
- tag: 10.4
- initialdb: auth_db
- name: keycloak-postgress
- pass: password
- user: user
- enabled: true
- harvest: true
- image:
- name: keycloak
- tag: 1
- name: keycloak
- port: 8080
- realm: cloudharness
- subdomain: accounts
- webclient:
- id: web-client
- secret: 452952ae-922c-4766-b912-7b106271e34b
- keycloak-gatekeeper:
- enabled: true
- image:
- name: keycloak-gatekeeper
- tag: 1
- name: keycloak-gatekeeper
- test:
- enabled: true
- harvest: true
- image:
- name: samples
- tag: 1
- name: samples
- port: 8080
- subdomain: test
- workflows:
- enabled: true
- harvest: false
- image:
- name: workflows
- tag: 1
- name: workflows
- port: 8080
- subdomain: workflows
-domain: cloudharness.metacell.us
-env:
- - name: CH_VERSION
- value: 0.0.1
- - name: CH_CHART_VERSION
- value: 0.0.1
- - name: CH_ELASTICSEARCH_ENABLED
- value: true
- - name: CH_ELASTICSEARCH_NAME
- value: elasticsearch
- - name: CH_ELASTICSEARCH_IMAGE_NAME
- value: docker.elastic.co/elasticsearch/elasticsearch
- - name: CH_ELASTICSEARCH_IMAGE_TAG
- value: 7.2.0
- - name: CH_ELASTICSEARCH_PORT
- value: 9200
- - name: CH_ELASTICSEARCH_NODEPORT
- value: 9300
- - name: CH_ELASTICSEARCH_STORAGE
- value: latest0Gi
- - name: CH_KIBANA_ENABLED
- value: true
- - name: CH_KIBANA_SECUREME
- value: true
- - name: CH_KIBANA_NAME
- value: el-kibana
- - name: CH_KIBANA_IMAGE_NAME
- value: docker.elastic.co/kibana/kibana
- - name: CH_KIBANA_IMAGE_TAG
- value: 7.2.0
- - name: CH_KIBANA_PORT
- value: 5601
- - name: CH_KIBANA_SUBDOMAIN
- value: monitoring
- - name: CH_KIBANA_GATEKEEPER_IMAGE
- value: keycloak-gatekeeper
- - name: CH_KIBANA_GATEKEEPER_TAG
- value: latest
- - name: CH_EVENTS_ENABLED
- value: true
- - name: CH_EVENTS_NAME
- value: kafka-manager-gk
- - name: CH_EVENTS_SUBDOMAIN
- value: events
- - name: CH_EVENTS_PORT
- value: 80
- - name: CH_KAFKA_NAME
- value: bootstrap
- - name: CH_KAFKA_PORT
- value: 9092
- - name: CH_ARGO_ENABLED
- value: true
- - name: CH_ARGO_NAME
- value: argo-ui-gk
- - name: CH_ARGO_SUBDOMAIN
- value: argo
- - name: CH_ARGO_PORT
- value: 80
- - name: CH_KEYCLOAK_GATEKEEPER_ENABLED
- value: true
- - name: CH_KEYCLOAK_GATEKEEPER_NAME
- value: keycloak-gatekeeper
- - name: CH_KEYCLOAK_GATEKEEPER_IMAGE_NAME
- value: keycloak-gatekeeper
- - name: CH_KEYCLOAK_GATEKEEPER_IMAGE_TAG
- value: latest
- - name: CH_WORKFLOWS_ENABLED
- value: true
- - name: CH_WORKFLOWS_NAME
- value: workflows
- - name: CH_WORKFLOWS_IMAGE_NAME
- value: workflows
- - name: CH_WORKFLOWS_IMAGE_TAG
- value: latest
- - name: CH_WORKFLOWS_HARVEST
- value: false
- - name: CH_WORKFLOWS_PORT
- value: 8080
- - name: CH_WORKFLOWS_SUBDOMAIN
- value: workflows
- - name: CH_KEYCLOAK_ENABLED
- value: true
- - name: CH_KEYCLOAK_NAME
- value: keycloak
- - name: CH_KEYCLOAK_IMAGE_NAME
- value: keycloak
- - name: CH_KEYCLOAK_IMAGE_TAG
- value: latest
- - name: CH_KEYCLOAK_ADMIN_PASS
- value: metacell
- - name: CH_KEYCLOAK_ADMIN_USER
- value: admin
- - name: CH_KEYCLOAK_ADMIN_ROLE
- value: administrator
- - name: CH_KEYCLOAK_CLIENT_ID
- value: rest-client
- - name: CH_KEYCLOAK_CLIENT_SECRET
- value: 5678eb6e-9e2c-4ee5-bd54-34e7411339e8
- - name: CH_KEYCLOAK_DB_IMAGE_NAME
- value: postgres
- - name: CH_KEYCLOAK_DB_IMAGE_TAG
- value: latest0.4
- - name: CH_KEYCLOAK_DB_INITIALDB
- value: auth_db
- - name: CH_KEYCLOAK_DB_NAME
- value: keycloak-postgress
- - name: CH_KEYCLOAK_DB_PASS
- value: password
- - name: CH_KEYCLOAK_DB_USER
- value: user
- - name: CH_KEYCLOAK_HARVEST
- value: true
- - name: CH_KEYCLOAK_WEBCLIENT_ID
- value: web-client
- - name: CH_KEYCLOAK_WEBCLIENT_SECRET
- value: 452952ae-922c-4766-b912-7b106271e34b
- - name: CH_KEYCLOAK_PORT
- value: 8080
- - name: CH_KEYCLOAK_REALM
- value: cloudharness
- - name: CH_KEYCLOAK_SUBDOMAIN
- value: accounts
- - name: CH_TEST_ENABLED
- value: true
- - name: CH_TEST_NAME
- value: test
- - name: CH_TEST_IMAGE_NAME
- value: test
- - name: CH_TEST_IMAGE_TAG
- value: latest
- - name: CH_TEST_HARVEST
- value: true
- - name: CH_TEST_PORT
- value: 8080
- - name: CH_TEST_SUBDOMAIN
- value: test
- - name: CH_DOCS_ENABLED
- value: true
- - name: CH_DOCS_NAME
- value: docs
- - name: CH_DOCS_IMAGE_NAME
- value: docs
- - name: CH_DOCS_IMAGE_TAG
- value: latest
- - name: CH_DOCS_HARVEST
- value: false
- - name: CH_DOCS_PORT
- value: 8080
- - name: CH_DOCS_SUBDOMAIN
- value: docs
- - name: CH_DOMAIN
- value: cloudharness.metacell.us
- - name: CH_IMAGE_REGISTRY
- value: localhost:5000
- - name: CH_IMAGE_TAG
- value: latest
-fullnameOverride: ""
-ingress:
- enabled: true
- letsencrypt:
- email: facundo@metacell.us
- name: cloudharness-ingress
-minikube: true
-nameOverride: ""
-privenv:
- - name: CH_SECRET
- value: 'In God we trust; all others must bring data. ― W. Edwards Deming'
-registry: localhost:5000
-serviceaccount: argo-workflows
-tag: 1
diff --git a/libraries/cloudharness-common/cloudharness/utils/secrets.py b/libraries/cloudharness-common/cloudharness/utils/secrets.py
new file mode 100644
index 00000000..3e2c85ce
--- /dev/null
+++ b/libraries/cloudharness-common/cloudharness/utils/secrets.py
@@ -0,0 +1,13 @@
+import os
+
+def get_secret(name: str, key: str):
+ """
+ Helper class for the CloudHarness application secrets
+
+ The application secret will be read from the secret file
+
+ Args:
+ name (str): name of the secret
+ key (str): name of the data key in the secret
+ """
+ return os.environ.get(f'{name}-{key}', None)
diff --git a/libraries/cloudharness-common/cloudharness/workflows/argo.py b/libraries/cloudharness-common/cloudharness/workflows/argo.py
index 3103c62d..3550c8b7 100644
--- a/libraries/cloudharness-common/cloudharness/workflows/argo.py
+++ b/libraries/cloudharness-common/cloudharness/workflows/argo.py
@@ -15,7 +15,11 @@
version = 'v1alpha1'
plural = 'workflows'
-namespace = 'argo-workflows'
+
+# determine the namespace of the current app and run the workflow in that namespace
+from cloudharness.utils.config import CloudharnessConfig as conf
+ch_conf = conf.get_configuration()
+namespace = ch_conf and ch_conf.get('namespace','argo-workflows')
CUSTOM_OBJECT_URL = f"/apis/{group}/{version}/{plural}"
@@ -148,7 +152,7 @@ def create_namespace():
check_namespace()
except Exception as e:
log.error('Namespace for argo workflows not found', exc_info=e)
- log.info("Creating namespace " + namespace)
+ log.info("Creating namespace %s", namespace)
try:
create_namespace()
except Exception as e:
diff --git a/libraries/cloudharness-common/cloudharness/workflows/operations.py b/libraries/cloudharness-common/cloudharness/workflows/operations.py
index 56516536..79760082 100644
--- a/libraries/cloudharness-common/cloudharness/workflows/operations.py
+++ b/libraries/cloudharness-common/cloudharness/workflows/operations.py
@@ -11,7 +11,7 @@
from . import argo
-from .tasks import Task, SendResultTask
+from .tasks import Task, SendResultTask, CustomTask
from cloudharness import log
@@ -27,8 +27,9 @@ class ManagedOperation:
based on a collection of tasks that run according to the operation type and configuration.
"""
- def __init__(self, name):
+ def __init__(self, name, *args, **kwargs):
self.name = name
+ self.on_exit_notify = kwargs.get('on_exit_notify', None)
def execute(self, **parameters):
raise NotImplementedError(f"{self.__class__.__name__} is abstract")
@@ -39,12 +40,12 @@ class ContainerizedOperation(ManagedOperation):
Abstract Containarized operation based on an argo workflow
"""
- def __init__(self, basename):
+ def __init__(self, basename, *args, **kwargs):
"""
:param status:
:param parameters:
"""
- super(ContainerizedOperation, self).__init__(basename)
+ super(ContainerizedOperation, self).__init__(basename, *args, **kwargs)
self.persisted = None
@@ -67,12 +68,38 @@ def to_workflow(self, **arguments):
return workflow
def spec(self):
- return {
+ spec = {
'entrypoint': self.entrypoint,
- 'templates': tuple(self.modify_template(template) for template in self.templates),
+ 'TTLSecondsAfterFinished': 24*60*60, # remove the workflow & pod after 1 day
+ 'templates': [self.modify_template(template) for template in self.templates],
'serviceAccountName': SERVICE_ACCOUNT,
- 'imagePullSecrets': [{'name': CODEFRESH_PULL_SECRET}]
+ 'imagePullSecrets': [{'name': CODEFRESH_PULL_SECRET}],
+ 'volumes': [{
+ 'name': 'cloudharness-allvalues',
+ 'configMap': {
+ 'name': 'cloudharness-allvalues'
+ }
+ }] # mount allvalues so we can use the cloudharness Python library
}
+ if self.on_exit_notify:
+ spec = self.add_on_exit_notify_handler(spec)
+ return spec
+
+ def add_on_exit_notify_handler(self, spec):
+ queue = self.on_exit_notify['queue']
+ payload = self.on_exit_notify['payload']
+ exit_task = CustomTask(
+ name="exit-handler",
+ image_name='workflows-notify-queue',
+ workflow_result='{{workflow.status}}',
+ queue_name=queue,
+ payload=payload
+ )
+ spec['onExit'] = 'exit-handler'
+ spec['templates'].append(
+ self.modify_template(exit_task.spec())
+ )
+ return spec
def modify_template(self, template):
"""Hook to modify templates (e.g. add volumes)"""
@@ -83,6 +110,8 @@ def submit(self):
op = self.to_workflow()
log.debug("Submitting workflow\n" + pyaml.dump(op))
+ log.error(pyaml.dump(op))
+ print(pyaml.dump(op))
self.persisted = argo.submit_workflow(op) # TODO use rest api for that? Include this into cloudharness.workflows?
@@ -104,7 +133,7 @@ def is_error(self):
return False
def name_from_path(self, path):
- return path.replace('/', '').lower()
+ return path.replace('/', '').replace('_', '').lower()
class SyncOperation(ManagedOperation):
@@ -192,7 +221,7 @@ def get_operation_update_url(self):
class CompositeOperation(AsyncOperation):
"""Operation with multiple tasks"""
- def __init__(self, basename, tasks, shared_directory="", shared_volume_size=10):
+ def __init__(self, basename, tasks, *args, shared_directory="", shared_volume_size=10, **kwargs):
"""
:param basename:
@@ -201,7 +230,7 @@ def __init__(self, basename, tasks, shared_directory="", shared_volume_size=10):
will also be available from the container as environment variable `shared_directory`
:param shared_volume_size: size of the shared volume in MB (is shared_directory is not set, it is ignored)
"""
- AsyncOperation.__init__(self, basename)
+ AsyncOperation.__init__(self, basename, *args, **kwargs)
self.tasks = tasks
if shared_directory:
@@ -229,34 +258,52 @@ def templates(self):
def spec(self):
spec = super().spec()
if self.volumes:
- spec['volumeClaimTemplates'] = [self.spec_volume(volume) for volume in self.volumes]
+ spec['volumeClaimTemplates'] = [self.spec_volumeclaim(volume) for volume in self.volumes if ':' not in volume] # without PVC prefix (e.g. /location)
+ spec['volumes'] += [self.spec_volume(volume) for volume in self.volumes if ':' in volume] # with PVC prefix (e.g. pvc-001:/location)
return spec
def modify_template(self, template):
# TODO verify the following condition. Can we mount volumes also with source based templates
if self.volumes and 'container' in template:
- template['container']['volumeMounts'] = \
- [{'name': self.name_from_path(volume), 'mountPath': volume} for volume in self.volumes]
+ template['container']['volumeMounts'] += [self.volume_template(volume) for volume in self.volumes]
return template
- def spec_volume(self, volume):
- return {
- 'metadata': {
- 'name': self.name_from_path(volume),
- },
- 'spec': {
- 'accessModes': ["ReadWriteOnce"],
- 'resources': {
- 'requests':
- {
- 'storage': f'{self.shared_volume_size}Mi'
- }
-
+ def volume_template(self, volume):
+ path = volume
+ if ":" in path:
+ path = volume.split(':')[-1]
+ return dict({'name': self.name_from_path(path), 'mountPath': path })
+
+ def spec_volumeclaim(self, volume):
+ # when the volume is NOT prefixed by a PVC (e.g. /location) then create a temporary PVC for the workflow
+ if ':' not in volume:
+ return {
+ 'metadata': {
+ 'name': self.name_from_path(volume.split(':')[0]),
+ },
+ 'spec': {
+ 'accessModes': ["ReadWriteOnce"],
+ 'resources': {
+ 'requests':
+ {
+ 'storage': f'{self.shared_volume_size}Mi'
+ }
+ }
}
-
}
- }
+ return {}
+ def spec_volume(self, volume):
+ # when the volume is prefixed by a PVC (e.g. pvc-001:/location) then add the PVC to the volumes of the workflow
+ if ':' in volume:
+ pvc, path = volume.split(':')
+ return {
+ 'name': self.name_from_path(path),
+ 'persistentVolumeClaim': {
+ 'claimName': pvc
+ }
+ }
+ return {}
class PipelineOperation(CompositeOperation):
diff --git a/libraries/cloudharness-common/cloudharness/workflows/tasks.py b/libraries/cloudharness-common/cloudharness/workflows/tasks.py
index ebacf445..6c11a220 100644
--- a/libraries/cloudharness-common/cloudharness/workflows/tasks.py
+++ b/libraries/cloudharness-common/cloudharness/workflows/tasks.py
@@ -62,7 +62,12 @@ def spec(self):
'image': self.image_name,
'env': self.envs,
'resources': self.resources,
- 'imagePullPolicy': self.image_pull_policy
+ 'imagePullPolicy': self.image_pull_policy,
+ 'volumeMounts': [{
+ 'name': 'cloudharness-allvalues',
+ 'mountPath': '/opt/cloudharness/resources/allvalues.yaml',
+ 'subPath': 'allvalues.yaml'
+ }],
},
'inputs': {},
'metadata': {},
diff --git a/libraries/cloudharness-common/cloudharness/workflows/utils.py b/libraries/cloudharness-common/cloudharness/workflows/utils.py
index 6410a946..fcea0ba8 100644
--- a/libraries/cloudharness-common/cloudharness/workflows/utils.py
+++ b/libraries/cloudharness-common/cloudharness/workflows/utils.py
@@ -1,5 +1,6 @@
import os
+from cloudharness.events.client import EventClient
from cloudharness.utils.env import get_variable
WORKFLOW_NAME_VARIABLE_NAME = "CH_WORKFLOW_NAME"
@@ -15,3 +16,7 @@ def get_workflow_name():
def get_shared_directory():
return os.getenv(SHARED_DIRECTORY_VARIABLE_NAME)
+
+def notify_queue(queue, message):
+ client = EventClient(queue)
+ client.produce(message)
diff --git a/libraries/cloudharness-common/requirements.txt b/libraries/cloudharness-common/requirements.txt
index 9119e5dc..97779c99 100644
--- a/libraries/cloudharness-common/requirements.txt
+++ b/libraries/cloudharness-common/requirements.txt
@@ -4,6 +4,7 @@ cffi==1.12.2
chardet==3.0.4
cryptography==2.6.1
idna==2.8
+jwt==1.0.0
pycosat==0.6.3
pycparser==2.19
pyOpenSSL==19.0.0
@@ -15,4 +16,6 @@ urllib3==1.24.1
pykafka==2.8.0
pyaml
kafka-python
-kubernetes
\ No newline at end of file
+kubernetes
+sentry-sdk[flask]==0.14.4
+python-keycloak==0.23.0
diff --git a/libraries/cloudharness-common/setup.py b/libraries/cloudharness-common/setup.py
index 284c0aaa..931fcde9 100644
--- a/libraries/cloudharness-common/setup.py
+++ b/libraries/cloudharness-common/setup.py
@@ -3,7 +3,7 @@
NAME = "cloudharness"
-VERSION = "0.1.0"
+VERSION = "0.2.0"
# To install the library, run the following
#
# python setup.py install
@@ -14,7 +14,11 @@
REQUIREMENTS = [
'kubernetes',
'kafka-python',
- 'pyaml'
+ 'pyaml',
+ 'jwt',
+ 'requests>=2.21.0',
+ 'sentry-sdk[flask]>=0.14.4',
+ 'python-keycloak==0.23.0'
]
diff --git a/libraries/cloudharness-common/tests/test_applications.py b/libraries/cloudharness-common/tests/test_applications.py
new file mode 100644
index 00000000..01f0b32b
--- /dev/null
+++ b/libraries/cloudharness-common/tests/test_applications.py
@@ -0,0 +1,80 @@
+from cloudharness.applications import ApplicationConfiguration, get_configuration
+
+conf_1 = {
+ 'name': 'app1',
+ 'harness': {
+ 'service': {
+ 'auto': False
+ },
+ 'deployment': {
+ 'auto': True
+ },
+ 'sentry': True
+ }
+}
+
+conf_2 = {
+ 'name': 'app2',
+ 'harness': {
+ 'service': {
+ 'auto': False
+ },
+ 'deployment': {
+ 'auto': False
+ },
+ 'sentry': True
+ }
+}
+
+conf_2sub = {
+ 'name': 'app2sub',
+ 'harness': {
+ 'service': {
+ 'auto': True
+ },
+ 'deployment': {
+ 'auto': False
+ },
+ 'sentry': False
+ }
+}
+
+conf_2['subapp'] = conf_2sub
+
+
+def test_application_conf():
+ uut = ApplicationConfiguration(conf_1)
+ assert not uut.is_auto_service()
+ assert uut.is_auto_deployment()
+ assert uut.is_sentry_enabled()
+
+
+def test_get_configuration():
+ from cloudharness.utils.config import CloudharnessConfig
+ CloudharnessConfig.allvalues = {
+ 'apps': {
+ 'a': conf_1,
+ 'b': conf_2
+ }
+
+ }
+ uut = get_configuration('app1')
+ assert uut.name == 'app1'
+ assert not uut.is_auto_service()
+ assert uut.is_auto_deployment()
+ assert uut.is_sentry_enabled()
+
+ uut = get_configuration('app2')
+ assert uut.name == 'app2'
+ assert not uut.is_auto_service()
+ assert not uut.is_auto_deployment()
+ assert uut.is_sentry_enabled()
+
+
+ # uut = get_configuration('app2sub') # FIXME this should work
+ uut = uut.subapp
+
+ assert uut.name == 'app2sub'
+ assert uut.is_auto_service()
+ assert not uut.is_auto_deployment()
+ assert not uut.is_sentry_enabled()
diff --git a/utilities/README.md b/utilities/README.md
index 218e9b7a..a1bbc44f 100644
--- a/utilities/README.md
+++ b/utilities/README.md
@@ -1,3 +1,52 @@
#CloudHarness Deploy
-CloudHarness Deploy is a collection of Python utilities to create CloudHarness deployments.
\ No newline at end of file
+CloudHarness Deploy is a collection of Python utilities to create CloudHarness deployments.
+
+## harness-deployment
+
+Generates the helm chart to deploy on Kubernetes.
+
+Usage:
+
+```bash
+harness-deployment .
+```
+
+For more info, `harness-deployment --help`
+
+
+## harness-application
+
+Create a new REST application.
+
+Usage:
+
+```bash
+harness-application myapp
+```
+
+For more info, `harness-application --help`
+
+## harness-codefresh
+
+Generates the Codefresh continuous deployment specification.
+
+Usage:
+
+```bash
+harness-codefresh .
+```
+
+For more info, `harness-codefresh --help`
+
+## harness-generate
+
+Generates server and client code for all standard harness REST applications.
+
+Usage:
+
+```bash
+harness-generate .
+```
+
+For more info, `harness-generate --help`
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/build.py b/utilities/cloudharness_utilities/build.py
index 6968fa71..da1fdde9 100644
--- a/utilities/cloudharness_utilities/build.py
+++ b/utilities/cloudharness_utilities/build.py
@@ -1,22 +1,29 @@
import os
import logging
+import tempfile
from docker import from_env as DockerClient
+from .utils import find_dockerfiles_paths, app_name_from_path, merge_configuration_directories
from .constants import NODE_BUILD_IMAGE, APPS_PATH, STATIC_IMAGES_PATH, BASE_IMAGES_PATH
-from .utils import find_dockerfiles_paths, image_name_from_docker_path
-
class Builder:
- def __init__(self, root_paths, images, tag, registry='', interactive=False, exclude=tuple()):
- self.images = images
+ def __init__(self, root_paths, include, tag, namespace, domain, registry='', interactive=False,
+ exclude=tuple()):
+ self.included = include or []
self.tag = tag
self.root_paths = root_paths
self.registry = registry
self.interactive = interactive
self.exclude = exclude
+ self.namespace = namespace
+ self.domain = domain
+
+ if include:
+ logging.info('Building the following subpaths: %s.', ', '.join(include))
+ def set_docker_client(self):
# connect to docker
try:
self.client = DockerClient()
@@ -36,28 +43,28 @@ def push(self, image_repository):
# filter the images to build
- def should_build_image(self, image_name) -> bool:
- if image_name in self.exclude:
+ def should_build_image(self, image_path) -> bool:
+ if image_path in self.exclude:
return False
- if len(self.images) == 0:
+ if not self.included:
if self.interactive:
- answer = input("Do you want to build " + image_name + "? [Y/n]")
+ answer = input("Do you want to build " + image_path + "? [Y/n]")
return answer.upper() != 'N'
return True
- if image_name in self.images:
+ if any(inc in image_path for inc in self.included):
return True
- logging.info("Skipping build for image", image_name)
+ logging.info("Skipping build for image %s", image_path)
return False
def run(self):
- for root_path in self.root_paths:
- self.find_and_build_under_path(BASE_IMAGES_PATH, context_path=root_path, root_path=root_path)
- # Build static images that will be use as base for other images
- self.find_and_build_under_path(STATIC_IMAGES_PATH, root_path=root_path)
-
- self.find_and_build_under_path(APPS_PATH, root_path=root_path)
-
+ self.set_docker_client()
+ logging.info('Start building docker images')
+ for rpath in self.root_paths:
+ logging.info('Building from root directory %s', rpath)
+ self.find_and_build_under_path(BASE_IMAGES_PATH, rpath, rpath)
+ self.find_and_build_under_path(STATIC_IMAGES_PATH, None, rpath)
+ self.find_and_build_under_path(APPS_PATH, None, rpath)
def find_and_build_under_path(self, base_path, context_path=None, root_path=None):
abs_base_path = os.path.join(root_path, base_path)
@@ -65,19 +72,32 @@ def find_and_build_under_path(self, base_path, context_path=None, root_path=None
self.should_build_image(path))
for dockerfile_path in docker_files:
-
dockerfile_rel_path = "" if not context_path else os.path.relpath(dockerfile_path, start=context_path)
# extract image name
- image_name = image_name_from_docker_path(os.path.relpath(dockerfile_path, start=abs_base_path))
- self.build_image(image_name, dockerfile_rel_path, context_path=context_path if context_path else dockerfile_path)
+ image_name = app_name_from_path(os.path.relpath(dockerfile_path, start=abs_base_path))
+ if self.should_build_image(os.path.relpath(dockerfile_path, start=abs_base_path)):
+ self.build_image(image_name, dockerfile_rel_path,
+ context_path=context_path if context_path else dockerfile_path)
+
+
+ def build_under_path(self, dpath):
+ """ Uses docker sdk to build a docker images from path information """
+ image_name = dpath['name']
+ dockerfile_rel_path = dpath['rel_path']
+ context_path = dpath['context_path']
+ dockerfile_path = dpath['abs_path']
+
+ self.build_image(image_name, dockerfile_rel_path,
+ context_path=context_path if context_path else dockerfile_path)
+
def build_image(self, image_name, dockerfile_rel_path, context_path=None):
- registry = "" if not self.registry else self.registry + '/'
+ registry = "" if not self.registry else self.registry.strip('/') + '/' # make sure the registry ends with only one single /
# build image
image_tag = f'{registry}{image_name}:{self.tag}' if self.tag else image_name
- buildargs = dict(TAG=self.tag, REGISTRY=registry)
+ buildargs = dict(TAG=self.tag, REGISTRY=registry, NAMESPACE=self.namespace, DOMAIN=self.domain)
# print header
logging.info(f'\n{80 * "#"}\nBuilding {image_tag} \n{80 * "#"}\n')
@@ -86,7 +106,8 @@ def build_image(self, image_name, dockerfile_rel_path, context_path=None):
image, response = self.client.images.build(path=context_path,
tag=image_tag,
buildargs=buildargs,
- dockerfile=os.path.join(dockerfile_rel_path, "Dockerfile") if dockerfile_rel_path else None
+ dockerfile=os.path.join(dockerfile_rel_path,
+ "Dockerfile") if dockerfile_rel_path else None
)
# log stream
@@ -95,3 +116,11 @@ def build_image(self, image_name, dockerfile_rel_path, context_path=None):
logging.info(line['stream'].replace('\n', ' ').replace('\r', ''))
if self.registry:
self.push(image_tag)
+
+
+ def log_merging_operation(self, dpaths:[dict]) -> None:
+ logging_message = f"\n\nFound multiple dockerfiles for the next image ({dpaths[0]['name']}):\n\n"
+ for dpath in dpaths:
+ logging_message += f"{dpath['abs_path']}\n"
+ logging_message += "\nWill proceed to merge the two folder and build from the result\n\n"
+ logging.info(logging_message)
diff --git a/utilities/cloudharness_utilities/codefresh.py b/utilities/cloudharness_utilities/codefresh.py
index 5fa7769d..5d0b29eb 100644
--- a/utilities/cloudharness_utilities/codefresh.py
+++ b/utilities/cloudharness_utilities/codefresh.py
@@ -3,58 +3,65 @@
import logging
from .constants import HERE, BUILD_STEP_BASE, BUILD_STEP_STATIC, BUILD_STEP_PARALLEL, BUILD_STEP_INSTALL, \
- CODEFRESH_REGISTRY, K8S_IMAGE_EXCLUDE, CODEFRESH_PATH, CODEFRESH_BUILD_PATH, \
+ CODEFRESH_PATH, CODEFRESH_BUILD_PATH, \
CODEFRESH_TEMPLATE_PATH, APPS_PATH, STATIC_IMAGES_PATH, BASE_IMAGES_PATH, DEPLOYMENT_PATH
from .helm import collect_helm_values
-from .utils import find_dockerfiles_paths, image_name_from_docker_path, \
- get_image_name, get_template, merge_to_yaml_file
+from .utils import find_dockerfiles_paths, app_name_from_path, \
+ get_image_name, get_template, merge_to_yaml_file, dict_merge
logging.getLogger().setLevel(logging.INFO)
-CLOUD_HARNESS_PATH = 'cloud-harness'
+CLOUD_HARNESS_PATH = "cloud-harness"
-def create_codefresh_deployment_scripts(deployment_root_path, tag="${{CF_REVISION}}", codefresh_path=CODEFRESH_PATH):
+def create_codefresh_deployment_scripts(root_paths, codefresh_path=CODEFRESH_PATH, include=()):
"""
Entry point to create deployment scripts for codefresh: codefresh.yaml and helm chart
"""
- codefresh = get_template(os.path.join(deployment_root_path, CODEFRESH_TEMPLATE_PATH))
+ if include:
+ logging.info('Including the following subpaths to the build: %s.', ', '.join(include))
+
+ codefresh = get_template(os.path.join(HERE, CODEFRESH_TEMPLATE_PATH))
codefresh['steps'][BUILD_STEP_BASE]['steps'] = {}
codefresh['steps'][BUILD_STEP_STATIC]['steps'] = {}
codefresh['steps'][BUILD_STEP_PARALLEL]['steps'] = {}
- def codefresh_build_step_from_base_path(base_path, build_step, root_context=None):
- abs_base_path = os.path.join(deployment_root_path, base_path)
- for dockerfile_path in find_dockerfiles_paths(abs_base_path):
- app_relative_to_root = os.path.relpath(dockerfile_path, deployment_root_path)
- app_relative_to_base = os.path.relpath(dockerfile_path, abs_base_path)
- app_name = image_name_from_docker_path(app_relative_to_base)
- if app_name in K8S_IMAGE_EXCLUDE:
- continue
- build = codefresh_app_build_spec(app_name, os.path.relpath(root_context,
- deployment_root_path) if root_context else app_relative_to_root,
- dockerfile_path=os.path.join(
- os.path.relpath(dockerfile_path, root_context) if root_context else '',
+ for root_path in root_paths:
+ template_path = os.path.join(root_path, CODEFRESH_TEMPLATE_PATH)
+ if os.path.exists(template_path):
+ tpl = get_template(template_path)
+ del tpl['steps'][BUILD_STEP_BASE]
+ del tpl['steps'][BUILD_STEP_STATIC]
+ del tpl['steps'][BUILD_STEP_PARALLEL]
+ codefresh = dict_merge(codefresh, tpl)
+
+ def codefresh_build_step_from_base_path(base_path, build_step, fixed_context=None):
+ abs_base_path = os.path.join(os.getcwd(), base_path)
+ for dockerfile_path in find_dockerfiles_paths(abs_base_path):
+ app_relative_to_root = os.path.relpath(dockerfile_path, '.')
+ app_relative_to_base = os.path.relpath(dockerfile_path, abs_base_path)
+ app_name = app_name_from_path(app_relative_to_base)
+ if include and not any(inc in dockerfile_path for inc in include):
+ continue
+ build = codefresh_app_build_spec(
+ app_name=app_name,
+ app_context_path=os.path.relpath(fixed_context, '.') if fixed_context else app_relative_to_root,
+ dockerfile_path=os.path.join(os.path.relpath(dockerfile_path, fixed_context) if fixed_context else '',
"Dockerfile"))
- codefresh['steps'][build_step]['steps'][app_name] = build
-
- codefresh_build_step_from_base_path(BASE_IMAGES_PATH, BUILD_STEP_BASE, root_context=deployment_root_path)
- codefresh_build_step_from_base_path(STATIC_IMAGES_PATH, BUILD_STEP_STATIC)
- codefresh_build_step_from_base_path(APPS_PATH, BUILD_STEP_PARALLEL)
+ codefresh['steps'][build_step]['steps'][app_name] = build
- if os.path.exists(os.path.join(deployment_root_path, CLOUD_HARNESS_PATH)):
- logging.info('Create build steps for cloud-harness images')
- codefresh_build_step_from_base_path(os.path.join(CLOUD_HARNESS_PATH, BASE_IMAGES_PATH), BUILD_STEP_BASE,
- root_context=deployment_root_path)
- codefresh_build_step_from_base_path(os.path.join(CLOUD_HARNESS_PATH, STATIC_IMAGES_PATH), BUILD_STEP_STATIC)
- codefresh_build_step_from_base_path(os.path.join(CLOUD_HARNESS_PATH, APPS_PATH), BUILD_STEP_PARALLEL)
+ codefresh_build_step_from_base_path(os.path.join(root_path, BASE_IMAGES_PATH), BUILD_STEP_BASE,
+ fixed_context=root_path)
+ codefresh_build_step_from_base_path(os.path.join(root_path, STATIC_IMAGES_PATH), BUILD_STEP_STATIC)
+ codefresh_build_step_from_base_path(os.path.join(root_path, APPS_PATH), BUILD_STEP_PARALLEL)
codefresh['steps'] = {k: step for k, step in codefresh['steps'].items() if
- 'type' not in step or step['type'] != 'parallel' or (step['steps'] if 'steps' in step else [])}
+ 'type' not in step or step['type'] != 'parallel' or (
+ step['steps'] if 'steps' in step else [])}
- codefresh_abs_path = os.path.join(deployment_root_path, DEPLOYMENT_PATH, codefresh_path)
+ codefresh_abs_path = os.path.join(os.getcwd(), DEPLOYMENT_PATH, codefresh_path)
codefresh_dir = os.path.dirname(codefresh_abs_path)
if not os.path.exists(codefresh_dir):
os.makedirs(codefresh_dir)
@@ -74,16 +81,21 @@ def codefresh_build_spec(**kwargs):
return build
-def codefresh_app_build_spec(app_name, app_path, dockerfile_path="Dockerfile"):
+def codefresh_app_build_spec(app_name, app_context_path, dockerfile_path="Dockerfile"):
logging.info('Generating build script for ' + app_name)
title = app_name.capitalize().replace('-', ' ').replace('/', ' ').replace('.', ' ').strip()
- build = codefresh_build_spec(image_name=get_image_name(app_name), title=title, working_directory='./' + app_path,
+ build = codefresh_build_spec(image_name=get_image_name(app_name), title=title,
+ working_directory='./' + app_context_path,
dockerfile=dockerfile_path)
- specific_build_template_path = os.path.join(app_path, 'build.yaml')
+ specific_build_template_path = os.path.join(app_context_path, 'build.yaml')
if os.path.exists(specific_build_template_path):
- logging.info("Specific build template found:", specific_build_template_path)
+ logging.info("Specific build template found: %s" % (specific_build_template_path))
with open(specific_build_template_path) as f:
build_specific = yaml.safe_load(f)
+
+ build_args = build_specific.pop('build_arguments') if 'build_arguments' in build_specific else []
build.update(build_specific)
+ build.update({'build_arguments': build['build_arguments'] + build_args})
+
return build
diff --git a/utilities/cloudharness_utilities/constants.py b/utilities/cloudharness_utilities/constants.py
index d8f7482a..7b29ba8c 100644
--- a/utilities/cloudharness_utilities/constants.py
+++ b/utilities/cloudharness_utilities/constants.py
@@ -27,10 +27,11 @@
CH_BASE_IMAGES = {'cloudharness-base': 'python:3.7-alpine', 'cloudharness-base-debian': 'python:3'}
-K8S_IMAGE_EXCLUDE = ('accounts-keycloak-gatekeeper',)
BUILD_STEP_BASE = 'build_base_images'
BUILD_STEP_STATIC = 'build_static_images'
BUILD_STEP_PARALLEL = 'build_application_images'
BUILD_STEP_INSTALL = 'deployment'
+BUILD_FILENAMES = ('node_modules',)
+
diff --git a/utilities/cloudharness_utilities/deployment-configuration/codefresh-build-template.yaml b/utilities/cloudharness_utilities/deployment-configuration/codefresh-build-template.yaml
index dec571af..e0af336c 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/codefresh-build-template.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/codefresh-build-template.yaml
@@ -2,7 +2,8 @@ type: build
stage: build
tag: '${{CF_REVISION}}'
dockerfile: Dockerfile
-when:
- branch:
- only:
- - '${{CF_BRANCH}}'
\ No newline at end of file
+build_arguments:
+ - REGISTRY=${{REGISTRY}}
+ - TAG=${{CF_REVISION}}
+ - NAMESPACE=${{NAMESPACE}}
+ - DOMAIN=${{DOMAIN}}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/codefresh-template.yaml b/utilities/cloudharness_utilities/deployment-configuration/codefresh-template.yaml
index c95419b5..e0b96c47 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/codefresh-template.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/codefresh-template.yaml
@@ -1,8 +1,8 @@
version: '1.0'
stages:
-- prepare
-- build
-- deploy
+ - prepare
+ - build
+ - deploy
steps:
main_clone:
title: Clone main repository
@@ -15,6 +15,23 @@ steps:
title: Post main clone
type: parallel
stage: prepare
+ prepare_deployment:
+ title: "Prepare helm chart"
+ image: python:3.7
+ stage: prepare
+ working_directory: .
+ commands:
+ - pip install cloud-harness/utilities
+ - harness-deployment . cloud-harness -m build -t ${{CF_REVISION}} -d ${{DOMAIN}} -r ${{REGISTRY}} -rs ${{REGISTRY_SECRET}}
+ prepare_deployment_view:
+ commands:
+ - 'helm template ./deployment/helm --debug -n ${{NAME}}'
+ environment:
+ - ACTION=auth
+ - KUBE_CONTEXT=${{NAME}}
+ image: codefresh/cfstep-helm:2.16.1
+ stage: prepare
+ title: 'View helm chart'
build_base_images:
title: Build base images
type: parallel
@@ -32,36 +49,15 @@ steps:
stage: build
steps:
REPLACE_ME
-
- prepare_deployment:
- title: "Prepare helm chart"
- image: python:3.7
- stage: deploy
- working_directory: .
- commands:
- - pip install -r cloud-harness/requirements.txt
- - harness-deployment . cloud-harness -t ${{CF_REVISION}} -d ${{DOMAIN}}
-
- prepare_deployment_view:
- commands:
- - 'helm template ./deployment/helm --debug -n ${{NAME}}'
- environment:
- - ACTION=auth
- - KUBE_CONTEXT=${{NAME}}
- image: codefresh/cfstep-helm:2.16.1
- stage: prepare
- title: 'View helm chart'
-
deployment:
stage: deploy
- image: 'codefresh/cfstep-helm:2.16.1'
+ image: codefresh/cfstep-helm:3.0.3
title: Installing chart
environment:
- CHART_REF=./deployment/helm
- - RELEASE_NAME=${{NAME}}
- - KUBE_CONTEXT=${{NAME}}
+ - RELEASE_NAME=${{NAMESPACE}}
+ - KUBE_CONTEXT=${{CLUSTER_NAME}}
- NAMESPACE=${{NAMESPACE}}
- - TILLER_NAMESPACE=kube-system
- CHART_VERSION=0.0.1
- HELM_REPO_USE_HTTP=false
- HELM_REPO_CONTEXT_PATH=
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/Chart.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/Chart.yaml
index bb11a716..20d334a2 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/Chart.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/Chart.yaml
@@ -7,4 +7,6 @@ maintainers:
- name: Facundo Rodriguez
email: facundo@metacell.us
- name: Filippo Ledda
- email: filippo@metacell.us
\ No newline at end of file
+ email: filippo@metacell.us
+ - name: Zoran Sinnema
+ email: zoran@metacell.us
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/NOTES.txt b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/NOTES.txt
index 012476ed..8f8d019d 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/NOTES.txt
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/NOTES.txt
@@ -4,8 +4,8 @@
https://{{ printf "%s" $.Values.domain }}
{{- range $app := .Values.apps }}
-{{- if and $app.subdomain }}
- https://{{ printf "%s.%s" $app.subdomain $.Values.domain }}
+{{- if and $app.harness.subdomain }}
+ https://{{ printf "%s.%s" $app.harness.subdomain $.Values.domain }}
{{- end}}
{{- end }}
{{- end }}
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/_helpers.tpl b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/_helpers.tpl
index 033cc2c4..51122a8a 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/_helpers.tpl
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/_helpers.tpl
@@ -48,6 +48,7 @@ Add environmental variables to all containers
- name: {{ $pair.name | quote }}
value: {{ $pair.value | quote }}
{{- end }}
+{{- if .Values.apps.accounts }}
- name: CH_ACCOUNTS_CLIENT_SECRET
value: {{ .Values.apps.accounts.client.secret | quote }}
- name: CH_ACCOUNTS_REALM
@@ -59,6 +60,7 @@ Add environmental variables to all containers
- name: DOMAIN
value: {{ .Values.domain | quote }}
{{- end -}}
+{{- end -}}
{{/*
Add private environmental variables to all containers
*/}}
@@ -76,7 +78,7 @@ Defines docker registry
*/}}
{{- define "deploy_utils.registry" }}
{{- if not (eq .Values.registry.name "") }}
-{{- printf "%s/" .Values.registry.name }}
+{{- printf "%s" .Values.registry.name }}
{{- end }}
{{- end }}
@@ -105,8 +107,8 @@ hostAliases:
hostnames:
{{ printf "- %s" .Values.domain }}
{{- range $app := .Values.apps }}
- {{- if $app.subdomain }}
- {{ printf "- %s.%s" $app.subdomain $domain }}
+ {{- if $app.harness.subdomain }}
+ {{ printf "- %s.%s" $app.harness.subdomain $domain }}
{{- end }}
{{- end }}
{{- end }}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-deployments.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-deployments.yaml
index fcd561f0..d264f78f 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-deployments.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-deployments.yaml
@@ -2,17 +2,17 @@
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ .app.name | quote }}
+ name: {{ .app.harness.deployment.name| quote }}
namespace: {{ .root.Values.namespace }}
labels:
- app: {{ .app.name | quote }}
-{{ include "deploy_utils.labels" .root | indent 4 }}
+ app: {{ .app.harness.deployment.name| quote }}
+{{- include "deploy_utils.labels" .root | indent 4 }}
spec:
- replicas: {{ .app.replicas | default 1 }}
+ replicas: {{ .app.harness.deployment.replicas | default 1 }}
selector:
matchLabels:
- app: {{ .app.name | quote }}
-{{ include "deploy_utils.labels" .root | indent 6 }}
+ app: {{ .app.harness.deployment.name| quote }}
+{{- include "deploy_utils.labels" .root | indent 6 }}
template:
metadata:
{{- if .app.harvest }}
@@ -21,41 +21,68 @@ spec:
metricbeat: "true"
{{- end }}
labels:
- app: {{ .app.name | quote }}
-{{ include "deploy_utils.labels" .root | indent 8 }}
+ app: {{ .app.harness.deployment.name| quote }}
+{{- include "deploy_utils.labels" .root | indent 8 }}
spec:
- {{ if .root.Values.registry.secret }}
+ {{- if and .root.Values.registry.secret (contains .root.Values.registry.name .app.harness.deployment.image) }}
imagePullSecrets:
- name: {{ .root.Values.registry.secret }}
{{- end }}
containers:
- - name: {{ .app.name | default "cloudharness-docs" | quote }}
- image: {{ .app.image }}
+ - name: {{ .app.harness.deployment.name| default "cloudharness-docs" | quote }}
+ image: {{ .app.harness.deployment.image }}
imagePullPolicy: {{ include "deploy_utils.pullpolicy" .root }}
env:
{{- include "deploy_utils.env" .root | nindent 8 }}
{{- include "deploy_utils.privenv" .root | nindent 8 }}
+ {{- if .app.harness.secrets }}
+ {{- range $secret := .app.harness.secrets }}
+ - name: {{ print $secret.name "-" $secret.key }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ print $secret.name }}
+ key: {{ print $secret.key }}
+ {{- end }}
+ {{- end }}
ports:
- - containerPort: {{ .app.port | default 8080 }}
+ - containerPort: {{ .app.harness.deployment.port | default 8080 }}
resources:
requests:
- memory: {{ .app.resources.requests.memory | default "32Mi" }}
- cpu: {{ .app.resources.requests.cpu | default "25m" }}
+ memory: {{ .app.harness.deployment.resources.requests.memory | default "32Mi" }}
+ cpu: {{ .app.harness.deployment.resources.requests.cpu | default "25m" }}
limits:
- memory: {{ .app.resources.limits.memory | default "64Mi" }}
- cpu: {{ .app.resources.limits.cpu | default "50m" }}
+ memory: {{ .app.harness.deployment.resources.limits.memory | default "64Mi" }}
+ cpu: {{ .app.harness.deployment.resources.limits.cpu | default "50m" }}
+ volumeMounts:
+ - name: cloudharness-allvalues
+ mountPath: /opt/cloudharness/resources/allvalues.yaml
+ subPath: allvalues.yaml
+ {{- if .app.harness.deployment.volume }}
+ - name: {{ .app.harness.deployment.volume.name }}
+ mountPath: {{ .app.harness.deployment.volume.mountpath }}
+ {{- end }}
+ volumes:
+ - name: cloudharness-allvalues
+ configMap:
+ name: cloudharness-allvalues
+ {{- if .app.harness.deployment.volume }}
+ - name: {{ .app.harness.deployment.volume.name }}
+ persistentVolumeClaim:
+ claimName: {{ .app.harness.deployment.volume.name }}
+ {{- end }}
+
---
{{- end }}
{{- range $app := .Values.apps }}
- {{- if and (hasKey $app "port") $app.autodeploy | default false }}
+ {{- if and (hasKey $app "port") $app.harness.deployment.auto | default false }}
---
- {{ include "deploy_utils.deployment" (dict "root" $ "app" $app) }}
+ {{- include "deploy_utils.deployment" (dict "root" $ "app" $app) }}
{{- end }}
{{- range $subapp := $app }}
{{- if contains "map" (typeOf $subapp) }}
- {{- if and (hasKey $subapp "port") $subapp.autodeploy | default false }}
+ {{- if hasKey $subapp "harness" }}
---
- {{ include "deploy_utils.deployment" (dict "root" $ "app" $subapp) }}
+ {{- include "deploy_utils.deployment" (dict "root" $ "app" $subapp) }}
{{- end }}
{{- end }}
{{- end }}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-gatekeepers.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-gatekeepers.yaml
index 00762314..3c4ab65e 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-gatekeepers.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-gatekeepers.yaml
@@ -1,88 +1,98 @@
{{/* Secured Services/Deployments */}}
+
{{- define "deploy_utils.securedservice" }}
+{{- $tls := not (not .root.Values.tls) }}
apiVersion: v1
kind: ConfigMap
metadata:
- name: "{{ .app.name }}-gk"
+ name: "{{ .app.harness.service.name }}-gk"
labels:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
data:
proxy.yml: |-
verbose: true
- discovery-url: https://{{ .root.Values.apps.accounts.subdomain }}.{{ .root.Values.domain }}/auth/realms/{{ .root.Values.namespace }}
+ discovery-url: {{ ternary "https" "http" $tls}}://{{ .root.Values.apps.accounts.subdomain }}.{{ .root.Values.domain }}/auth/realms/{{ .root.Values.namespace }}
client-id: {{ .root.Values.apps.accounts.webclient.id | quote }}
client-secret: {{ .root.Values.apps.accounts.webclient.secret }}
- listen: 0.0.0.0:80
+ secure-cookie: {{ $tls }}
+ listen: 0.0.0.0:8080
enable-refresh-tokens: true
tls-cert:
tls-private-key:
- redirection-url: https://{{ .app.subdomain }}.{{ .root.Values.domain }}
+ redirection-url: {{ ternary "https" "http" $tls }}://{{ .app.harness.subdomain }}.{{ .root.Values.domain }}
encryption-key: AgXa7xRcoClDEU0ZDSH4X0XhL5Qy2Z2j
- upstream-url: http://{{ .app.name }}.{{ .app.namespace | default .root.Release.Namespace }}:{{ .app.port | default 80}}
+ upstream-url: http://{{ .app.harness.service.name }}.{{ .app.namespace | default .root.Release.Namespace }}:{{ .app.harness.service.port | default 80}}
scopes:
- vpn-user
+ {{ if .app.harness.secured }}
+ {{ with .app.harness.uri_role_mapping }}
resources:
- - uri: /*
- methods:
- - GET
- roles:
- - {{ .root.Values.apps.accounts.admin.role }}
+ {{. | toYaml | nindent 4 }}
+ {{- end }}
+ {{- end }}
+ {{ if or .root.Values.local (not $tls) }}
+ skip-openid-provider-tls-verify: true
+ {{- end }}
+ cacert.crt: {{ .files.Get "resources/certs/cacert.crt" | quote }}
---
apiVersion: v1
kind: Service
metadata:
- name: "{{ .app.name }}-gk"
+ name: "{{ .app.harness.service.name }}-gk"
labels:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
spec:
ports:
- name: http
- port: 80
+ port: 8080
selector:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
- name: "{{ .app.name }}-gk"
+ name: "{{ .app.harness.service.name }}-gk"
labels:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
spec:
replicas: 1
selector:
matchLabels:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
template:
metadata:
labels:
- app: "{{ .app.name }}-gk"
+ app: "{{ .app.harness.service.name }}-gk"
spec:
- {{ if .root.Values.registry.secret }}
- imagePullSecrets:
- - name: {{ .root.Values.registry.secret }}
- {{- end }}
{{ include "deploy_utils.etcHosts" .root | indent 6 }}
containers:
- - name: {{ .app.name | quote }}
- {{ if .root.Values.local }}
- image: {{ include "deploy_utils.registry" .root }}{{ .root.Values.apps.accounts.gatekeeper.image}}:{{ .root.Values.tag }}
- imagePullPolicy: {{ include "deploy_utils.pullpolicy" .root }}
- {{ else }}
- image: "keycloak/keycloak-gatekeeper:9.0.2"
+ - name: {{ .app.harness.service.name | quote }}
+ image: "quay.io/louketo/louketo-proxy:1.0.0"
imagePullPolicy: IfNotPresent
+ {{ if .root.Values.local }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ command:
+ - "/bin/bash"
+ - "-c"
+ args:
+ - "sleep 10 && /bin/update-ca-trust force enable && /bin/update-ca-trust && /opt/louketo/louketo-proxy"
{{- end }}
-
env:
- name: PROXY_CONFIG_FILE
value: /opt/proxy.yml
volumeMounts:
- - name: "{{ .app.name }}-gk-proxy-config"
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
mountPath: /opt/proxy.yml
subPath: proxy.yml
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
+ mountPath: /etc/pki/ca-trust/source/anchors/cacert.crt
+ subPath: cacert.crt
ports:
- name: http
- containerPort: {{ .root.Values.apps.accounts.port | default 8080 }}
+ containerPort: 8080
- name: https
containerPort: 8443
resources:
@@ -93,22 +103,25 @@ spec:
memory: "64Mi"
cpu: "100m"
volumes:
- - name: "{{ .app.name }}-gk-proxy-config"
+ - name: "{{ .app.harness.service.name }}-gk-proxy-config"
configMap:
- name: "{{ .app.name }}-gk"
+ name: "{{ .app.harness.service.name }}-gk"
---
{{- end }}
{{- if .Values.secured_gatekeepers }}
+{{ $files := .Files }}
{{- range $app := .Values.apps }}
- {{- if and (hasKey $app "port") ($app.secureme) }}
+ {{- if and (hasKey $app "port") ($app.harness.secured) }}
---
- {{ include "deploy_utils.securedservice" (dict "root" $ "app" $app) }}
+ {{ include "deploy_utils.securedservice" (dict "root" $ "app" $app "files" $files) }}
{{- end }}
{{- range $subapp := $app }}
{{- if contains "map" (typeOf $subapp) }}
- {{- if and (hasKey $subapp "port") $subapp.secureme }}
+ {{- if and (hasKey $subapp "harness.port") (hasKey $subapp "harness.secured") }}
+ {{- if $subapp.harness.secured }}
---
- {{ include "deploy_utils.securedservice" (dict "root" $ "app" $subapp) }}
+ {{ include "deploy_utils.securedservice" (dict "root" $ "app" $subapp "files" $files) }}
+ {{- end }}
{{- end }}
{{- end }}
{{- end }}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-services.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-services.yaml
index 15fd1345..1385bfed 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-services.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-services.yaml
@@ -1,35 +1,32 @@
{{/* Services */}}
{{- define "deploy_utils.service" }}
+---
apiVersion: v1
kind: Service
metadata:
- name: {{ .app.name | quote }}
+ name: {{ .app.harness.service.name | quote }}
labels:
- app: {{ .app.name | quote }}
+ app: {{ .app.harness.deployment.name | quote }}
{{ include "deploy_utils.labels" .root | indent 4 }}
spec:
selector:
- app: {{ .app.name | quote }}
+ app: {{ .app.harness.deployment.name| quote }}
ports:
- - port: {{ .app.port }}
+ - port: {{ .app.harness.service.port }}
name: http
+---
{{- end }}
-
-
{{- range $app := .Values.apps }}
- {{- if and (hasKey $app "port") ($app.autoservice | default true) }}
----
- {{ include "deploy_utils.service" (dict "root" $ "app" $app) }}
+ {{- if $app.harness.service.auto }}
+ {{ include "deploy_utils.service" (dict "root" $ "app" $app) }}
+ {{- end }}
{{- range $subapp := $app }}
{{- if contains "map" (typeOf $subapp) }}
- {{- if and (hasKey $subapp "port") ($subapp.autoservice | default false) }}
----
+ {{- if hasKey $subapp "harness"}}
+ {{- if and (hasKey $subapp.harness "service") $subapp.harness.service.auto }}
{{ include "deploy_utils.service" (dict "root" $ "app" $subapp) }}
{{- end }}
{{- end }}
- {{- end }}
-
{{- end }}
{{- end }}
-
-
+ {{- end }}
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-volumes.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-volumes.yaml
new file mode 100644
index 00000000..0748002d
--- /dev/null
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/auto-volumes.yaml
@@ -0,0 +1,22 @@
+{{- define "deploy_utils.pvolume" }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ .app.harness.deployment.volume.name }}
+ labels:
+ app: {{ .app.harness.deployment.name| quote }}
+spec:
+ storageClassName: standard
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .app.harness.deployment.volume.size }}
+---
+{{- end }}
+{{- range $app := .Values.apps }}
+ {{- if and $app.harness.deployment.auto $app.harness.deployment.volume }}
+---
+ {{- include "deploy_utils.pvolume" (dict "root" $ "app" $app) }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/certs/letsencrypt.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/certs/letsencrypt.yaml
index 9e6357b9..9d00da02 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/certs/letsencrypt.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/certs/letsencrypt.yaml
@@ -2,15 +2,15 @@
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
- name: letsencrypt
+ name: {{ printf "%s-%s" "letsencrypt" .Values.namespace }}
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.ingress.letsencrypt.email }}
privateKeySecretRef:
- name: letsencrypt
+ name: {{ .Values.tls }}
solvers:
- http01:
ingress:
ingressName: cloudharness-ingress
-{{ end }}
\ No newline at end of file
+{{ end }}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/configmap.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/configmap.yaml
new file mode 100644
index 00000000..fdb8975b
--- /dev/null
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/configmap.yaml
@@ -0,0 +1,9 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: cloudharness-allvalues
+ labels:
+ app: cloudharness
+data:
+ allvalues.yaml: |
+ {{- .Values | toYaml | nindent 4 }}
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/ingress.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/ingress.yaml
index 204778a7..e5e3e243 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/ingress.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/ingress.yaml
@@ -1,6 +1,6 @@
{{- if .Values.ingress.enabled }}
{{ $domain := .Values.domain }}
-{{ $tls := and .Values.tls }}
+{{ $tls := not (not .Values.tls) }}
{{ $secured_gatekeepers := and .Values.secured_gatekeepers }}
apiVersion: extensions/v1beta1
kind: Ingress
@@ -10,38 +10,40 @@ metadata:
{{- if not .Values.local }}
kubernetes.io/tls-acme: 'true'
kubernetes.io/ingress.class: nginx
- cert-manager.io/cluster-issuer: letsencrypt
+ cert-manager.io/cluster-issuer: {{ printf "%s-%s" "letsencrypt" .Values.namespace }}
{{- end }}
- nginx.ingress.kubernetes.io/ssl-redirect: 'false'
+ nginx.ingress.kubernetes.io/ssl-redirect: {{ (and $tls .Values.ingress.ssl_redirect) | quote }}
+ nginx.ingress.kubernetes.io/proxy-body-size: '10m'
+ nginx.ingress.kubernetes.io/from-to-www-redirect: 'true'
spec:
rules:
{{- range $app := .Values.apps }}
- {{- if $app.domain }}
- - host: {{ $app.domain | quote }}
+ {{- if $app.harness.domain }}
+ - host: {{ $domain | quote }}
http:
paths:
- path: /
backend:
- serviceName: {{- if (and $app.secureme $secured_gatekeepers) }} "{{ $app.name }}-gk" {{- else }} {{ $app.name | quote }}{{- end }}
- servicePort: {{- if (and $app.secureme $secured_gatekeepers) }} 80 {{- else }} {{ $app.port | default 80 }}{{- end }}
- {{- else if $app.subdomain }}
- - host: {{ printf "%s.%s" $app.subdomain $domain | quote }}
+ serviceName: {{- if (and $app.harness.secured $secured_gatekeepers) }} "{{ $app.harness.service.name }}-gk" {{- else }} {{ $app.harness.service.name | quote }}{{- end }}
+ servicePort: {{- if (and $app.harness.secured $secured_gatekeepers) }} 8080 {{- else }} {{ $app.harness.service.port | default 80 }}{{- end }}
+ {{- else if $app.harness.subdomain }}
+ - host: {{ printf "%s.%s" $app.harness.subdomain $domain | quote }}
http:
paths:
- path: /
backend:
- serviceName: {{- if (and $app.secureme $secured_gatekeepers) }} "{{ $app.name }}-gk" {{- else }} {{ $app.name | quote }}{{- end }}
- servicePort: {{- if (and $app.secureme $secured_gatekeepers) }} 80 {{- else }} {{ $app.port | default 80 }}{{- end }}
+ serviceName: {{- if (and $app.harness.secured $secured_gatekeepers) }} "{{ $app.harness.service.name }}-gk" {{- else }} {{ $app.harness.service.name | quote }}{{- end }}
+ servicePort: {{- if (and $app.harness.secured $secured_gatekeepers) }} 8080 {{- else }} {{ $app.harness.service.port | default 80 }}{{- end }}
{{- range $subapp := $app }}
- {{- if contains "map" (typeOf $subapp) }}
- {{- if and $subapp (hasKey $subapp "subdomain") }}
- - host: {{ printf "%s.%s.%s" $subapp.subdomain $app.subdomain $domain | quote }}
+ {{- if contains "map" (typeOf $subapp) }}
+ {{- if and $subapp (hasKey $subapp "harness.subdomain") }}
+ - host: {{ printf "%s.%s.%s" $subapp.harness.subdomain $app.harness.subdomain $domain | quote }}
http:
paths:
- path: /
backend:
- serviceName: {{- if (and $app.secureme $secured_gatekeepers) }} "{{ $subapp.name }}-gk" {{- else }} {{ $subapp.name | quote }}{{- end }}
- servicePort: {{- if (and $app.secureme $secured_gatekeepers) }} 80 {{- else }} {{ $subapp.port | default 80 }}{{- end }}
+ serviceName: {{- if (and $app.harness.secured $secured_gatekeepers) }} "{{ $subapp.harness.service.name }}-gk" {{- else }} {{ $subapp.harness.service.name | quote }}{{- end }}
+ servicePort: {{- if (and $app.harness.secured $secured_gatekeepers) }} 8080 {{- else }} {{ $subapp.harness.service.port | default 80 }}{{- end }}
{{- end }}
{{- end }}
{{- end }}
@@ -51,10 +53,10 @@ spec:
tls:
- hosts:
{{- range $app := .Values.apps }}
- {{- if $app.subdomain }}
- - {{ printf "%s.%s" $app.subdomain $domain | quote }}
+ {{- if $app.harness.subdomain }}
+ - {{ printf "%s.%s" $app.harness.subdomain $domain | quote }}
{{- else if $app.domain }}
- - {{ $app.domain | quote }}
+ - {{ $domain | quote }}
{{- end }}
{{- end }}
secretName: {{ $tls | quote }}
diff --git a/utilities/cloudharness_utilities/deployment-configuration/helm/templates/tls-secret.yaml b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/tls-secret.yaml
new file mode 100644
index 00000000..9c08fb4b
--- /dev/null
+++ b/utilities/cloudharness_utilities/deployment-configuration/helm/templates/tls-secret.yaml
@@ -0,0 +1,13 @@
+{{ if and .Values.local .Values.tls }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.tls | quote }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .Files.Get "resources/certs/tls.crt" | b64enc | quote }}
+ tls.key: {{ .Files.Get "resources/certs/tls.key" | b64enc | quote }}
+---
+{{- end }}
+
+
diff --git a/utilities/cloudharness_utilities/deployment-configuration/value-template.yaml b/utilities/cloudharness_utilities/deployment-configuration/value-template.yaml
index 6f4d9c66..0b2934a4 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/value-template.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/value-template.yaml
@@ -1,12 +1,26 @@
-autoservice: true
-name: CHANGE ME
-image:
- name: CHANGE ME
- tag: ${{CF_REVISION}}
-resources:
- requests:
- memory: "32Mi"
- cpu: "25m"
- limits:
- memory: "500Mi"
- cpu: "500m"
\ No newline at end of file
+harness:
+ name:
+ subdomain:
+ domain:
+ secured: false
+ uri_role_mapping:
+ - uri: /*
+ roles:
+ - administrator
+ deployment:
+ auto: false
+ replicas: 1
+ image:
+ name:
+ port: 8081
+ resources:
+ requests:
+ memory: "32Mi"
+ cpu: "25m"
+ limits:
+ memory: "500Mi"
+ cpu: "500m"
+ service:
+ auto: true
+ name:
+ port: 80
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/deployment-configuration/values-template.yaml b/utilities/cloudharness_utilities/deployment-configuration/values-template.yaml
index c494ad18..913c9221 100644
--- a/utilities/cloudharness_utilities/deployment-configuration/values-template.yaml
+++ b/utilities/cloudharness_utilities/deployment-configuration/values-template.yaml
@@ -6,7 +6,7 @@ domain: ${{DOMAIN}}
namespace: ch
registry:
name: "localhost:5000"
- secret: ${{REGISTRY_SECRET}}
+ secret:
tag: latest
serviceaccount: argo-workflows
apps:
@@ -22,5 +22,6 @@ privenv:
ingress:
enabled: true
name: cloudharness-ingress
+ ssl_redirect: true
letsencrypt:
email: facundo@metacell.us
\ No newline at end of file
diff --git a/utilities/cloudharness_utilities/helm.py b/utilities/cloudharness_utilities/helm.py
index 64903d70..b3ae2dd5 100644
--- a/utilities/cloudharness_utilities/helm.py
+++ b/utilities/cloudharness_utilities/helm.py
@@ -4,33 +4,50 @@
import yaml
import os
import shutil
+import sys
import logging
import subprocess
-
-from .constants import VALUES_MANUAL_PATH, VALUE_TEMPLATE_PATH, HELM_CHART_PATH, APPS_PATH, HELM_PATH, HERE, DEPLOYMENT_CONFIGURATION_PATH
-from .utils import get_cluster_ip, get_image_name, env_variable, get_sub_paths, image_name_from_docker_path, \
+import tarfile
+from docker import from_env as DockerClient
+from pathlib import Path
+from .constants import VALUES_MANUAL_PATH, VALUE_TEMPLATE_PATH, HELM_CHART_PATH, APPS_PATH, HELM_PATH, HERE, \
+ DEPLOYMENT_CONFIGURATION_PATH
+from .utils import get_cluster_ip, get_image_name, env_variable, get_sub_paths, app_name_from_path, \
get_template, merge_configuration_directories, merge_to_yaml_file, dict_merge
+KEY_HARNESS = 'harness'
+KEY_SERVICE = 'service'
+KEY_DEPLOYMENT = 'deployment'
+KEY_APPS = 'apps'
-
-def create_helm_chart(root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True, output_path='./deployment'):
+def create_helm_chart(root_paths, tag='latest', registry='', local=True, domain=None, exclude=(), secured=True,
+ output_path='./deployment', include=None, registry_secret=None, tls=True):
"""
Creates values file for the helm chart
"""
dest_deployment_path = os.path.join(output_path, HELM_CHART_PATH)
+ if os.path.exists(dest_deployment_path):
+ shutil.rmtree(dest_deployment_path)
# Initialize with default
copy_merge_base_deployment(dest_deployment_path, os.path.join(HERE, DEPLOYMENT_CONFIGURATION_PATH, HELM_PATH))
- helm_values = collect_helm_values(HERE, tag=tag, registry=registry, exclude=exclude)
+ helm_values = collect_helm_values(HERE, tag=tag, registry=registry, exclude=exclude, include=include)
# Override for every cloudharness scaffolding
for root_path in root_paths:
- copy_merge_base_deployment(dest_helm_chart_path=dest_deployment_path, base_helm_chart=os.path.join(root_path, DEPLOYMENT_CONFIGURATION_PATH, HELM_PATH))
- collect_apps_helm_templates(root_path, exclude=exclude, dest_helm_chart_path=dest_deployment_path)
- helm_values = dict_merge(helm_values, collect_helm_values(root_path, tag=tag, registry=registry, exclude=exclude))
-
- finish_helm_values(values=helm_values, tag=tag, registry=registry, local=local, domain=domain, secured=secured)
+ copy_merge_base_deployment(dest_helm_chart_path=dest_deployment_path,
+ base_helm_chart=os.path.join(root_path, DEPLOYMENT_CONFIGURATION_PATH, HELM_PATH))
+ collect_apps_helm_templates(root_path, exclude=exclude, include=include,
+ dest_helm_chart_path=dest_deployment_path)
+ helm_values = dict_merge(helm_values,
+ collect_helm_values(root_path, tag=tag, registry=registry, exclude=exclude,
+ include=include))
+
+ create_tls_certificate(local, domain, tls, output_path, helm_values)
+
+ finish_helm_values(values=helm_values, tag=tag, registry=registry, local=local, domain=domain, secured=secured,
+ registry_secret=registry_secret, tls=tls)
# Save values file for manual helm chart
merged_values = merge_to_yaml_file(helm_values, os.path.join(dest_deployment_path, VALUES_MANUAL_PATH))
return merged_values
@@ -40,7 +57,7 @@ def merge_helm_chart(source_templates_path, dest_helm_chart_path=HELM_CHART_PATH
pass
-def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=()):
+def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=(), include=None):
"""
Searches recursively for helm templates inside the applications and collects the templates in the destination
@@ -52,16 +69,16 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=()):
app_base_path = os.path.join(search_root, APPS_PATH)
for app_path in get_sub_paths(app_base_path):
- app_name = image_name_from_docker_path(os.path.relpath(app_path, app_base_path))
- if app_name in exclude:
+ app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
+ if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
template_dir = os.path.join(app_path, 'deploy/templates')
if os.path.exists(template_dir):
dest_dir = os.path.join(dest_helm_chart_path, 'templates', app_name)
- logging.info(f"Collecting templates for application {app_name} to {dest_dir}")
+ logging.info("Collecting templates for application %s to %s", app_name, dest_dir)
if os.path.exists(dest_dir):
- logging.warning("Merging/overriding all files in directory " + dest_dir)
+ logging.warning("Merging/overriding all files in directory %s", dest_dir)
merge_configuration_directories(template_dir, dest_dir)
else:
shutil.copytree(template_dir, dest_dir)
@@ -69,7 +86,7 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, exclude=()):
if os.path.exists(resources_dir):
dest_dir = os.path.join(dest_helm_chart_path, 'resources', app_name)
- logging.info(f"Collecting resources for application {app_name} to {dest_dir}")
+ logging.info("Collecting resources for application %s to %s", app_name, dest_dir)
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
shutil.copytree(resources_dir, dest_dir)
@@ -79,14 +96,14 @@ def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart):
if not os.path.exists(base_helm_chart):
return
if os.path.exists(dest_helm_chart_path):
- logging.info("Merging/overriding all files in directory {}".format(dest_helm_chart_path))
+ logging.info("Merging/overriding all files in directory %s", dest_helm_chart_path)
merge_configuration_directories(base_helm_chart, dest_helm_chart_path)
else:
- logging.info("Copying base deployment chart from {} to {}".format(base_helm_chart, dest_helm_chart_path))
+ logging.info("Copying base deployment chart from %s to %s", base_helm_chart, dest_helm_chart_path)
shutil.copytree(base_helm_chart, dest_helm_chart_path)
-def collect_helm_values(deployment_root, exclude=(), tag='latest', registry=''):
+def collect_helm_values(deployment_root, exclude=(), include=None, tag='latest', registry=''):
"""
Creates helm values from a cloudharness deployment scaffolding
"""
@@ -98,22 +115,23 @@ def collect_helm_values(deployment_root, exclude=(), tag='latest', registry=''):
else:
values = get_template(values_template_path)
- values['apps'] = {}
+ values[KEY_APPS] = {}
app_base_path = os.path.join(deployment_root, APPS_PATH)
for app_path in get_sub_paths(app_base_path):
- app_name = image_name_from_docker_path(os.path.relpath(app_path, app_base_path))
+ app_name = app_name_from_path(os.path.relpath(app_path, app_base_path))
- if app_name in exclude:
+ if app_name in exclude or (include and not any(inc in app_name for inc in include)):
continue
- app_values = create_values_spec(app_name, app_path, tag=tag, registry=registry, template_path=value_spec_template_path)
- values['apps'][app_name.replace('-', '_')] = app_values
+ app_values = create_values_spec(app_name, app_path, tag=tag, registry=registry,
+ template_path=value_spec_template_path)
+ values[KEY_APPS][app_name.replace('-', '_')] = app_values
return values
-def finish_helm_values(values, tag='latest', registry='', local=True, domain=None, secured=True):
+def finish_helm_values(values, tag='latest', registry='', local=True, domain=None, secured=True, registry_secret=None, tls=True):
"""
Sets default overridden values
"""
@@ -121,9 +139,13 @@ def finish_helm_values(values, tag='latest', registry='', local=True, domain=Non
logging.info(f"Registry set: {registry}")
if local:
values['registry']['secret'] = ''
- values['registry']['name'] = registry # Otherwise leave default for codefresh
- values['tag'] = tag # Otherwise leave default for codefresh
+ if registry_secret:
+ logging.info(f"Registry secret set")
+ values['registry']['name'] = registry
+ values['registry']['secret'] = registry_secret
+ values['tag'] = tag
values['secured_gatekeepers'] = secured
+ values['ingress']['ssl_redirect'] = values['ingress']['ssl_redirect'] and tls
if domain:
values['domain'] = domain
@@ -139,22 +161,76 @@ def finish_helm_values(values, tag='latest', registry='', local=True, domain=Non
create_env_variables(values)
return values
+
+def values_from_legacy(values):
+ harness = values[KEY_HARNESS]
+
+ if 'name' in values:
+ harness['name'] = values['name']
+ if 'subdomain' in values:
+ harness['subdomain'] = values['subdomain']
+ if 'autodeploy' in values:
+ harness[KEY_DEPLOYMENT]['auto'] = values['autodeploy']
+ if 'autoservice' in values:
+ harness[KEY_SERVICE]['auto'] = values['autoservice']
+ if 'secureme' in values:
+ harness['secured'] = values['secureme']
+ if 'resources' in values:
+ harness[KEY_DEPLOYMENT]['resources'].update(values['resources'])
+ if 'replicas' in values:
+ harness[KEY_DEPLOYMENT]['replicas'] = values['replicas']
+ if 'image' in values:
+ harness[KEY_DEPLOYMENT]['image'] = values['image']
+ if 'port' in values:
+ harness[KEY_DEPLOYMENT]['port'] = values['port']
+ harness[KEY_SERVICE]['port'] = values['port']
+
+
+def values_set_legacy(values):
+ harness = values[KEY_HARNESS]
+ if harness[KEY_DEPLOYMENT]['image']:
+ values['image'] = harness[KEY_DEPLOYMENT]['image']
+
+ values['name'] = harness['name']
+ if harness[KEY_DEPLOYMENT]['port']:
+ values['port'] = harness[KEY_DEPLOYMENT]['port']
+ values['resources'] = harness[KEY_DEPLOYMENT]['resources']
+
+
def create_values_spec(app_name, app_path, tag=None, registry='', template_path=VALUE_TEMPLATE_PATH):
logging.info('Generating values script for ' + app_name)
- values = get_template(template_path)
- if registry and registry[-1] != '/':
- registry = registry + '/'
- values['name'] = app_name
-
- values['image'] = registry + get_image_name(app_name) + f':{tag}' if tag else ''
+ values_default = get_template(template_path)
specific_template_path = os.path.join(app_path, 'deploy', 'values.yaml')
if os.path.exists(specific_template_path):
logging.info("Specific values template found: " + specific_template_path)
with open(specific_template_path) as f:
values_specific = yaml.safe_load(f)
- values.update(values_specific)
+ values = dict_merge(values_default, values_specific)
+ else:
+ values = values_default
+
+ values_from_legacy(values)
+ harness = values[KEY_HARNESS]
+
+ if not harness['name']:
+ harness['name'] = app_name
+ if not harness[KEY_SERVICE]['name']:
+ harness[KEY_SERVICE]['name'] = app_name
+ if not harness[KEY_DEPLOYMENT]['name']:
+ harness[KEY_DEPLOYMENT]['name'] = app_name
+ if not harness[KEY_DEPLOYMENT]['image']:
+ if registry and registry[-1] != '/':
+ registry = registry + '/'
+ harness[KEY_DEPLOYMENT]['image'] = registry + get_image_name(app_name) + f':{tag}' if tag else ''
+
+ values_set_legacy(values)
+
+ for k in values:
+ if isinstance(values[k], dict) and KEY_HARNESS in values[k]:
+ values[k][KEY_HARNESS] = dict_merge(values[k][KEY_HARNESS], values_default[KEY_HARNESS])
+
return values
@@ -171,32 +247,110 @@ def extract_env_variables_from_values(values, envs=tuple(), prefix=''):
def create_env_variables(values):
- for app_name, value in values['apps'].items():
- values['env'].extend(extract_env_variables_from_values(value, prefix='CH_' + app_name))
+ for app_name, value in values[KEY_APPS].items():
+ if KEY_HARNESS in value:
+ values['env'].extend(extract_env_variables_from_values(value[KEY_HARNESS], prefix='CH_' + app_name))
values['env'].append(env_variable('CH_DOMAIN', values['domain']))
values['env'].append(env_variable('CH_IMAGE_REGISTRY', values['registry']['name']))
values['env'].append(env_variable('CH_IMAGE_TAG', values['tag']))
def hosts_info(values):
-
domain = values['domain']
namespace = values['namespace']
- subdomains = (app['subdomain'] for app in values['apps'].values() if 'subdomain' in app and app['subdomain'])
+ subdomains = (app[KEY_HARNESS]['subdomain'] for app in values[KEY_APPS].values() if
+ KEY_HARNESS in app and app[KEY_HARNESS]['subdomain'])
try:
ip = get_cluster_ip()
except:
+ logging.warning('Cannot get cluster ip')
return
- logging.info("\nTo test locally, update your hosts file" + f"\n{ip}\t{' '.join(sd + '.' + domain for sd in subdomains)}")
+ logging.info(
+ "\nTo test locally, update your hosts file" + f"\n{ip}\t{' '.join(sd + '.' + domain for sd in subdomains)}")
- services = (app['name'].replace("-", "_") for app in values['apps'].values() if 'name' in app)
+ deployments = (app[KEY_HARNESS][KEY_DEPLOYMENT]['name'] for app in values[KEY_APPS].values() if KEY_HARNESS in app)
logging.info("\nTo run locally some apps, also those references may be needed")
- for appname in values['apps']:
- app = values['apps'][appname]
+ for appname in values[KEY_APPS]:
+ app = values[KEY_APPS][appname]
if 'name' not in app or 'port' not in app: continue
print(
"kubectl port-forward -n {namespace} deployment/{app} {port}:{port}".format(
app=appname, port=app['port'], namespace=namespace))
- print(f"127.0.0.1\t{' '.join(s + '.cloudharness' for s in services)}")
+ print(f"127.0.0.1\t{' '.join(s + '.cloudharness' for s in deployments)}")
+
+
+def create_tls_certificate(local, domain, tls, output_path, helm_values):
+
+ if not tls:
+ helm_values['tls'] = None
+ return
+ helm_values['tls'] = domain.replace(".", "-") + "-tls"
+ if not local:
+ return
+
+ HERE = os.path.dirname(os.path.realpath(__file__)).replace(os.path.sep, '/')
+ ROOT = os.path.dirname(os.path.dirname(HERE)).replace(os.path.sep, '/')
+
+ bootstrap_file_path = os.path.join(ROOT, 'utilities', 'cloudharness_utilities', 'scripts')
+ bootstrap_file = 'bootstrap.sh'
+ certs_parent_folder_path = os.path.join(output_path, 'helm', 'resources')
+ certs_folder_path = os.path.join(certs_parent_folder_path, 'certs')
+
+ if os.path.exists(os.path.join(certs_folder_path)):
+ # don't overwrite the certificate if it exists
+ return
+
+ try:
+ client = DockerClient()
+ client.ping()
+ except:
+ raise ConnectionRefusedError(
+ '\n\nIs docker running? Run "eval(minikube docker-env)" if you are using minikube...')
+
+ # Create CA and sign cert for domain
+ container = client.containers.run(image='frapsoft/openssl',
+ command=f'sleep 60',
+ entrypoint="",
+ detach=True,
+ environment=[f"DOMAIN={domain}"],
+ )
+
+ container.exec_run('mkdir -p /mnt/vol1')
+ container.exec_run('mkdir -p /mnt/certs')
+
+ # copy bootstrap file
+ cur_dir = os.getcwd()
+ os.chdir(bootstrap_file_path)
+ tar = tarfile.open(bootstrap_file + '.tar', mode='w')
+ try:
+ tar.add(bootstrap_file)
+ finally:
+ tar.close()
+ data = open(bootstrap_file + '.tar', 'rb').read()
+ container.put_archive('/mnt/vol1', data)
+ os.chdir(cur_dir)
+ container.exec_run(f'tar x {bootstrap_file}.tar', workdir='/mnt/vol1')
+
+ # exec bootstrap file
+ container.exec_run(f'/bin/ash /mnt/vol1/{bootstrap_file}')
+
+ # retrieve the certs from the container
+ bits, stat = container.get_archive('/mnt/certs')
+ if not os.path.exists(certs_folder_path):
+ os.makedirs(certs_folder_path)
+ f = open(f'{certs_parent_folder_path}/certs.tar', 'wb')
+ for chunk in bits:
+ f.write(chunk)
+ f.close()
+ cf = tarfile.open(f'{certs_parent_folder_path}/certs.tar')
+ cf.extractall(path=certs_parent_folder_path)
+
+ logs = container.logs()
+ logging.info(f'openssl container logs: {logs}')
+
+ # stop the container
+ container.kill()
+
+ logging.info("Created certificates for local deployment")
diff --git a/utilities/cloudharness_utilities/openapi.py b/utilities/cloudharness_utilities/openapi.py
index 1ea3452f..276e7841 100644
--- a/utilities/cloudharness_utilities/openapi.py
+++ b/utilities/cloudharness_utilities/openapi.py
@@ -2,17 +2,20 @@
import subprocess
import sys
+import shutil
import json
import glob
import urllib.request
from cloudharness_utilities import HERE
import logging
+
CODEGEN = os.path.join(HERE, 'bin', 'openapi-generator-cli.jar')
APPLICATIONS_SRC_PATH = os.path.join('applications')
LIB_NAME = 'cloudharness_cli'
OPENAPI_GEN_URL = 'https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/4.3.0/openapi-generator-cli-4.3.0.jar'
+
def generate_server(app_path):
openapi_dir = os.path.join(app_path, 'api')
openapi_file = glob.glob(os.path.join(openapi_dir, '*.yaml'))[0]
@@ -20,16 +23,18 @@ def generate_server(app_path):
os.system(command)
-def generate_client(module, openapi_file, CLIENT_SRC_PATH):
- with open('config-client.json', 'w') as f:
- f.write(json.dumps(dict(packageName=f"{LIB_NAME}.{module}")))
+def generate_python_client(module, openapi_file, client_src_path, lib_name=LIB_NAME):
+ config_path = os.path.join(os.path.dirname(openapi_file), 'config.json')
+
+ module = module.replace('-', '_')
+ with open(config_path, 'w') as f:
+ f.write(json.dumps(dict(packageName=f"{lib_name}.{module}")))
command = f"java -jar {CODEGEN} generate " \
f"-i {openapi_file} " \
f"-g python " \
- f"-o {CLIENT_SRC_PATH}/tmp-{module} " \
- f"-c config-client.json"
+ f"-o {client_src_path}/tmp-{module} " \
+ f"-c {config_path}"
os.system(command)
- os.remove('config-client.json')
def get_dependencies():
@@ -50,6 +55,3 @@ def get_dependencies():
if not os.path.exists(cdir):
os.makedirs(cdir)
urllib.request.urlretrieve(OPENAPI_GEN_URL, CODEGEN)
-
-
-get_dependencies()
diff --git a/utilities/cloudharness_utilities/scripts/bootstrap.sh b/utilities/cloudharness_utilities/scripts/bootstrap.sh
new file mode 100755
index 00000000..0cb0d1fd
--- /dev/null
+++ b/utilities/cloudharness_utilities/scripts/bootstrap.sh
@@ -0,0 +1,34 @@
+# Create CA private key and certificate
+openssl req -x509 -newkey rsa:4096 -nodes -subj "/C=US/ST=CA/O=Acme, Inc./CN=MetaCell" -sha256 -keyout rootCA.key -days 1024 -out rootCA.crt
+
+# template cnf
+cat > server.cnf < None:
+ """ Merge directories if they refer to the same application
+
+ Directories are merged in the destination from the root_paths list. The latter overrides the former.
+ Yaml files are merged, other files are overwritten.
+ """
+ if not os.path.exists(destination):
+ os.makedirs(destination)
+ else:
+ shutil.rmtree(destination)
+
+ for rpath in root_paths:
+ merge_configuration_directories(os.path.join(rpath, BASE_IMAGES_PATH),
+ os.path.join(destination, BASE_IMAGES_PATH))
+ merge_configuration_directories(os.path.join(rpath, STATIC_IMAGES_PATH),
+ os.path.join(destination, STATIC_IMAGES_PATH))
+ merge_configuration_directories(os.path.join(rpath, APPS_PATH),
+ os.path.join(destination, APPS_PATH))
+ merge_configuration_directories(os.path.join(rpath, 'libraries'),
+ os.path.join(destination, 'libraries'))
+ merge_configuration_directories(os.path.join(rpath, 'client'),
+ os.path.join(destination, 'client'))
+ merge_configuration_directories(os.path.join(rpath, 'deployment-configuration'),
+ os.path.join(destination, 'deployment-configuration'))
\ No newline at end of file
diff --git a/utilities/harness-codefresh b/utilities/harness-codefresh
index 37e6425c..a58fb841 100644
--- a/utilities/harness-codefresh
+++ b/utilities/harness-codefresh
@@ -15,14 +15,18 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Walk filesystem inside ./applications and define build and deploy scripts.')
- parser.add_argument('path', metavar='path', default=ROOT, type=str,
- help='Base path of the application.')
+ parser.add_argument('paths', metavar='paths', default=[ROOT], type=str, nargs='*',
+ help='Base paths to start looking for applications. The paths will be processed '
+ 'in the given order so determining the build and overriding precedence.')
+ parser.add_argument('-i', '--include', dest='include', action="append", default=[],
+ help='Specify the applications to include and exclude the rest. '
+ 'Omit to build images for all Docker files.')
+
args, unknown = parser.parse_known_args(sys.argv[1:])
- root_path = os.path.join(os.getcwd(), args.path) if not os.path.isabs(args.path) else args.path
if unknown:
print('There are unknown args. Make sure to call the script with the accepted args. Try --help')
print(f'unknown: {unknown}')
else:
- create_codefresh_deployment_scripts(root_path)
+ create_codefresh_deployment_scripts(args.paths, include=args.include)
diff --git a/utilities/harness-deployment b/utilities/harness-deployment
index 9364088a..b20ef64b 100644
--- a/utilities/harness-deployment
+++ b/utilities/harness-deployment
@@ -4,8 +4,9 @@ import sys
import os
from cloudharness_utilities.build import Builder
-from cloudharness_utilities.helm import collect_apps_helm_templates, create_helm_chart, hosts_info
-
+from cloudharness_utilities.utils import merge_app_directories
+from cloudharness_utilities.helm import create_helm_chart, hosts_info
+from cloudharness_utilities.codefresh import create_codefresh_deployment_scripts
HERE = os.path.dirname(os.path.realpath(__file__)).replace(os.path.sep, '/')
ROOT = os.path.dirname(os.path.dirname(HERE)).replace(os.path.sep, '/')
@@ -17,16 +18,23 @@ if __name__ == "__main__":
description='Walk filesystem inside ./applications and define build and deploy scripts.')
parser.add_argument('paths', metavar='paths', default=[ROOT], type=str, nargs='*',
- help='Base paths to start looking for applications. The paths will be processed in the given order.')
+ help='Base paths to start looking for applications. The paths will be processed '
+ 'in the given order so determining the build and overriding precedence.')
- parser.add_argument('-i', '--image', dest='image', action="append", default=[],
- help='Specify the images to build. Omit to build images for all Docker files.')
+ parser.add_argument('-i', '--include', dest='include', action="append", default=[],
+ help='Specify the applications to include and exclude the rest. '
+ 'Omit to build images for all Docker files.')
parser.add_argument('-t', '--tag', dest='tag', action="store", default='latest',
help='Add a tag with the current version (default `latest`)')
+ parser.add_argument('-n', '--namespace', dest='namespace', action="store", default='default',
+ help='Specify the namespace of the deployment (default `default`)')
+
parser.add_argument('-r', '--registry', dest='registry', action="store", default='',
help='Specify image registry prefix')
+ parser.add_argument('-rs', '--registry-secret', dest='registry_secret', action="store", default='',
+ help='Specify image registry secret')
parser.add_argument('-o', '--output', dest='output_path', action="store", default='./deployment',
help='Specify helm chart base path (default `./deployment)`')
parser.add_argument('-b', '--build', dest='build', action="store_true",
@@ -42,6 +50,13 @@ if __name__ == "__main__":
help='Disable secured gatekeepers access')
parser.add_argument('-e', '--exclude', dest='exclude', action="append", default=(),
help='Specify application to exclude from the deployment')
+
+ parser.add_argument('-m', '--merge', dest='merge', action="store", default=None,
+ help='Merge application folders and build in the given directory')
+
+ parser.add_argument('-dtls', '--disable-tls', dest='no_tls', action="store_true",
+ help='Disable tls (serve on http)')
+
args, unknown = parser.parse_known_args(sys.argv[1:])
root_paths = [os.path.join(os.getcwd(), path) for path in args.paths]
@@ -49,9 +64,28 @@ if __name__ == "__main__":
print('There are unknown args. Make sure to call the script with the accepted args. Try --help')
print(f'unknown: {unknown}')
else:
+
+ builder = Builder(root_paths, args.include, tag=args.tag, registry=args.registry, interactive=args.build_interactive,
+ exclude=args.exclude, namespace=args.namespace, domain=args.domain)
+
+ if args.merge:
+ merge_app_directories(builder.root_paths, destination=args.merge)
+ builder.root_paths = [args.merge]
if args.build or args.build_interactive:
- Builder(root_paths, args.image, tag=args.tag, registry=args.registry, interactive=args.build_interactive, exclude=args.exclude).run()
+ builder.run()
- values_manual_deploy = create_helm_chart(root_paths, tag=args.tag, registry=args.registry, domain=args.domain, local=args.local, secured=not args.unsecured, output_path=args.output_path, exclude=args.exclude)
- if args.local:
- hosts_info(values_manual_deploy)
+ values_manual_deploy = create_helm_chart(
+ root_paths,
+ tag=args.tag,
+ registry=args.registry,
+ domain=args.domain,
+ local=args.local,
+ secured=not args.unsecured,
+ output_path=args.output_path,
+ exclude=args.exclude,
+ include=args.include,
+ registry_secret=args.registry_secret,
+ tls=not args.no_tls
+ )
+ create_codefresh_deployment_scripts(builder.root_paths, include=args.include)
+ hosts_info(values_manual_deploy)
diff --git a/utilities/harness-generate b/utilities/harness-generate
index ca6b275e..df835871 100644
--- a/utilities/harness-generate
+++ b/utilities/harness-generate
@@ -3,29 +3,31 @@
import glob
import os
import shutil
+import sys
+import logging
-
-from cloudharness_utilities.openapi import LIB_NAME, generate_client, generate_server
+from cloudharness_utilities.openapi import LIB_NAME, generate_python_client, generate_server, get_dependencies
HERE = os.path.dirname(os.path.realpath(__file__))
ROOT = os.path.dirname(HERE)
-# MODULES = [ 'operations']
-OPENAPI_FILES = [path for path in glob.glob(ROOT + '/applications/*/api/*.yaml')]
-MODULES = [os.path.basename(os.path.dirname(os.path.dirname(path))) for path in OPENAPI_FILES]
-CLIENT_SRC_PATH = os.path.join(ROOT, 'client', LIB_NAME)
+def get_openapi_file_paths(root_path):
+ return [path for path in glob.glob(root_path + '/applications/*/api/*.yaml')]
+def get_modules(openapi_files):
+ return [os.path.basename(os.path.dirname(os.path.dirname(path))) for path in openapi_files]
-def generate_servers():
+def generate_servers(root_path):
"""
Generates server stubs
"""
-
- for i in range(len(MODULES)):
- openapi_file = OPENAPI_FILES[i]
+ openapi_files = get_openapi_file_paths(root_path)
+ modules = get_modules(openapi_files)
+ for i in range(len(modules)):
+ openapi_file = openapi_files[i]
application_root = os.path.dirname(os.path.dirname(openapi_file))
generate_server(application_root)
@@ -37,6 +39,7 @@ def copymergedir(root_src_dir, root_dst_dir):
:param root_dst_dir:
:return:
"""
+ logging.info('Copying directory %s to %s', root_src_dir, root_dst_dir)
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
@@ -46,14 +49,18 @@ def copymergedir(root_src_dir, root_dst_dir):
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
- shutil.copy(src_file, dst_dir)
+ try:
+ shutil.copy(src_file, dst_dir)
+ except:
+ logging.warning("Error copying file %s to %s.", src_file, dst_dir)
+
-def aggregate_packages():
- DOCS_PATH = os.path.join(CLIENT_SRC_PATH, 'docs')
- TEST_PATH = os.path.join(CLIENT_SRC_PATH,'test')
- README = os.path.join(CLIENT_SRC_PATH, 'README.md')
- REQUIREMENTS = os.path.join(CLIENT_SRC_PATH, 'requirements.txt')
- TEST_REQUIREMENTS = os.path.join(CLIENT_SRC_PATH, 'test-requirements.txt' )
+def aggregate_packages(client_src_path, lib_name=LIB_NAME):
+ DOCS_PATH = os.path.join(client_src_path, 'docs')
+ TEST_PATH = os.path.join(client_src_path, 'test')
+ README = os.path.join(client_src_path, 'README.md')
+ REQUIREMENTS = os.path.join(client_src_path, 'requirements.txt')
+ TEST_REQUIREMENTS = os.path.join(client_src_path, 'test-requirements.txt')
if not os.path.exists(DOCS_PATH):
os.mkdir(DOCS_PATH)
@@ -69,30 +76,30 @@ def aggregate_packages():
req_lines_seen = set()
test_req_lines_seen = set()
- for MODULE_TMP_PATH in glob.glob(CLIENT_SRC_PATH + '/tmp-*'):
- module = MODULE_TMP_PATH.split(f'{LIB_NAME}/tmp-')[-1]
+ for MODULE_TMP_PATH in glob.glob(client_src_path + '/tmp-*'):
+ module = MODULE_TMP_PATH.split(f'{lib_name}/tmp-')[-1].replace('-', '_')
# Moves package
- code_dest_dir = os.path.join(CLIENT_SRC_PATH, LIB_NAME, module)
- copymergedir(os.path.join(MODULE_TMP_PATH, LIB_NAME, module), code_dest_dir)
- copymergedir(f"{MODULE_TMP_PATH}/{LIB_NAME}.{module}", code_dest_dir) #Fixes a a bug with nested packages
+ code_dest_dir = os.path.join(client_src_path, lib_name, module)
+ copymergedir(os.path.join(MODULE_TMP_PATH, lib_name, module), code_dest_dir)
+ copymergedir(f"{MODULE_TMP_PATH}/{lib_name}.{module}", code_dest_dir) # Fixes a a bug with nested packages
# Adds Docs
module_doc_path = os.path.join(DOCS_PATH, module)
if not os.path.exists(module_doc_path):
-
os.mkdir(module_doc_path)
- copymergedir(f"{CLIENT_SRC_PATH}/tmp-{module}/docs", module_doc_path)
+ copymergedir(f"{client_src_path}/tmp-{module}/docs", module_doc_path)
# Adds Tests
- module_test_path = os.path.join(CLIENT_SRC_PATH, 'test', module)
+ module_test_path = os.path.join(client_src_path, 'test', module)
copymergedir(os.path.join(MODULE_TMP_PATH, 'test'), module_test_path)
-
-
# Merges Readme
readme_file = f"{MODULE_TMP_PATH}/README.md"
+ if not os.path.exists(readme_file):
+ logging.warning("Readme file not found: %s.", readme_file)
+ continue
with open(README, 'a+') as outfile:
with open(readme_file) as infile:
filedata = infile.read()
@@ -124,21 +131,41 @@ def aggregate_packages():
shutil.rmtree(MODULE_TMP_PATH)
-def generate_clients():
+def generate_clients(root_path, client_lib_name=LIB_NAME):
"""
Generates client stubs
"""
- for i in range(len(MODULES)):
- module = MODULES[i]
- openapi_file = OPENAPI_FILES[i]
- generate_client(module, openapi_file, CLIENT_SRC_PATH)
-
- aggregate_packages()
-
-
+ openapi_files = get_openapi_file_paths(root_path)
+ modules = get_modules(openapi_files)
+ client_src_path = os.path.join(root_path, 'client', client_lib_name)
+ for i in range(len(modules)):
+ module = modules[i]
+ openapi_file = openapi_files[i]
+ generate_python_client(module, openapi_file, client_src_path, lib_name=client_lib_name)
+ aggregate_packages(client_src_path, client_lib_name)
if __name__ == "__main__":
- generate_servers()
- generate_clients()
+
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description='Walk filesystem inside ./applications and define build and deploy scripts.')
+ parser.add_argument('path', metavar='path', default=ROOT, type=str,
+ help='Base path of the application.')
+ parser.add_argument('-nc', '--no-client', dest='client', action="store_false",
+ help='Specify for local deployments info and setup')
+ parser.add_argument('-ns', '--no-server', dest='server', action="store_false",
+ help='Specify for local deployments info and setup')
+ parser.add_argument('-cn', '--client-name', dest='client_name', action="store", default=LIB_NAME,
+ help='Specify image registry prefix')
+ args, unknown = parser.parse_known_args(sys.argv[1:])
+
+ root_path = os.path.join(os.getcwd(), args.path) if not os.path.isabs(args.path) else args.path
+
+ get_dependencies()
+ if args.server:
+ generate_servers(root_path)
+ if args.client:
+ generate_clients(root_path, args.client_name)
diff --git a/utilities/setup.py b/utilities/setup.py
index c4a70ed6..d8d78e06 100644
--- a/utilities/setup.py
+++ b/utilities/setup.py
@@ -12,7 +12,7 @@
NAME = "cloudharness-utilities"
-VERSION = "0.1.0"
+VERSION = "0.2.0"
# To install the library, run the following
#
# python setup.py install
diff --git a/utilities/tests/test_utils.py b/utilities/tests/test_utils.py
index 8b1526c7..f9914d9c 100644
--- a/utilities/tests/test_utils.py
+++ b/utilities/tests/test_utils.py
@@ -6,11 +6,11 @@
HERE = os.path.dirname(os.path.realpath(__file__)).replace(os.path.sep, '/')
def test_image_name_from_docker_path():
- assert image_name_from_docker_path("a") == 'a'
- assert image_name_from_docker_path("a/b") == 'a-b'
- assert image_name_from_docker_path("a/src/b") == 'a-b'
- assert image_name_from_docker_path("a/tasks/b") == 'a-b'
- assert image_name_from_docker_path("cloudharness/a/b") == 'a-b'
+ assert app_name_from_path("a") == 'a'
+ assert app_name_from_path("a/b") == 'a-b'
+ assert app_name_from_path("a/src/b") == 'a-b'
+ assert app_name_from_path("a/tasks/b") == 'a-b'
+ assert app_name_from_path("cloudharness/a/b") == 'a-b'
def test_merge_configuration_directories():