From d8820d7ce751b7047d419b3f375d57223e1f8160 Mon Sep 17 00:00:00 2001 From: kuanfandevops Date: Wed, 31 Jul 2024 19:10:00 +0000 Subject: [PATCH] Prettified Code! --- .github/ISSUE_TEMPLATE/bug.md | 6 +- .github/ISSUE_TEMPLATE/spike.md | 15 +- .github/ISSUE_TEMPLATE/task.md | 7 +- .github/ISSUE_TEMPLATE/user-story.md | 7 +- .pipeline/build-metabase.js | 8 +- .pipeline/build-patroni.js | 8 +- .pipeline/build.js | 8 +- .pipeline/clean-tools.js | 8 +- .pipeline/clean.js | 8 +- .pipeline/deploy-knp.js | 8 +- .pipeline/deploy-metabase.js | 8 +- .pipeline/deploy-patroni.js | 8 +- .pipeline/deploy-unittest.js | 8 +- .pipeline/deploy.js | 8 +- .pipeline/lib/build-metabase.js | 17 +- .pipeline/lib/build-patroni.js | 17 +- .pipeline/lib/build.js | 56 ++-- .pipeline/lib/clean-tools.js | 14 +- .pipeline/lib/clean.js | 77 +++--- .pipeline/lib/config.js | 259 ++++++++++++++---- .pipeline/lib/deploy-knp.js | 47 ++-- .pipeline/lib/deploy-metabase.js | 55 ++-- .pipeline/lib/deploy-patroni.js | 72 +++-- .pipeline/lib/deploy-unittest.js | 105 ++++--- .pipeline/lib/deploy.js | 102 ++++--- .pipeline/lib/keycloak.js | 234 ++++++++-------- README.md | 71 ++--- charts/cthub-spilo/Readme.md | 130 +++++---- charts/spilo/docs/restore.md | 23 +- django/README.md | 24 +- frontend/src/uploads/UploadContainer.js | 124 ++++----- .../uploads/components/UploadIssuesDetail.js | 4 +- frontend/src/users/UsersContainer.js | 2 +- frontend/src/users/components/UsersPage.js | 2 +- openshift/README.md | 12 +- openshift/templates/backend/README.md | 1 + .../openshift/README.md | 69 ++--- .../backup-container-2.6.1/cronjob.md | 5 +- openshift/templates/crunchydb/readme.md | 19 +- openshift/templates/keycloak/README.md | 6 +- .../templates/metabase-postgresql/README.md | 18 +- openshift/templates/metabase/README.md | 9 +- openshift/templates/minio/README.md | 8 +- openshift/templates/patroni-2.1.1/README.md | 16 +- openshift/templates/redis/readme.md | 8 +- openshift/templates/superset/readme.md | 10 +- superset/README.md | 4 +- 47 files changed, 1031 insertions(+), 704 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index afa680ef..7bc2d2d6 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -2,9 +2,8 @@ name: Bug about: An undesirable behaviour that needs correction title: Bug -labels: '' -assignees: '' - +labels: "" +assignees: "" --- **Describe the Bug** @@ -22,6 +21,7 @@ A clear and concise description of any implications. **Steps To Reproduce** Steps to reproduce the behaviour: User/Role: + 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' diff --git a/.github/ISSUE_TEMPLATE/spike.md b/.github/ISSUE_TEMPLATE/spike.md index f5917b39..9665cd41 100644 --- a/.github/ISSUE_TEMPLATE/spike.md +++ b/.github/ISSUE_TEMPLATE/spike.md @@ -2,25 +2,28 @@ name: Spike about: Research options prior to development work title: Spike -labels: '' -assignees: '' - +labels: "" +assignees: "" --- -**Problem Description** +**Problem Description** **In order to** (achieve some goal), (a system or persona) **needs to** (some action). **Solution Needs** + - Enter the non-negotiables of the solution (what are the needs vs. what are the wants) **Timebox** + - How much effort are we committing to this research? **Outcome** -Details describing the outcome of the research -- Was it successful? What direction should the work go? +Details describing the outcome of the research + +- Was it successful? What direction should the work go? - Was it unsuccessful? Discuss next steps with team **Additional Context** + - enter text here - enter text here diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md index e0f7fcd1..2a9a9adb 100644 --- a/.github/ISSUE_TEMPLATE/task.md +++ b/.github/ISSUE_TEMPLATE/task.md @@ -2,9 +2,8 @@ name: Task about: Any work that does not directly impact the user title: Task -labels: '' -assignees: '' - +labels: "" +assignees: "" --- **Describe the task** @@ -14,10 +13,12 @@ A clear and concise description of what the task is. The reason why this task is needed and/or what value it adds. **Acceptance Criteria** + - [ ] first - [ ] second - [ ] third **Additional context** + - Add any other context about the task here. - Or here diff --git a/.github/ISSUE_TEMPLATE/user-story.md b/.github/ISSUE_TEMPLATE/user-story.md index 91938c94..0f4dfcf3 100644 --- a/.github/ISSUE_TEMPLATE/user-story.md +++ b/.github/ISSUE_TEMPLATE/user-story.md @@ -2,14 +2,14 @@ name: User Story about: This template is to be used when describing a feature from the user's perspective title: User Story -labels: '' -assignees: '' - +labels: "" +assignees: "" --- **Title:** **Description:** + - [ ] "As a [type of user]" "I want [an action or feature]" "So that [benefit or value]" - **Wireframe:** @@ -22,6 +22,7 @@ assignees: '' - [ ] Given I am a , When I am , then **Development Checklist:** + - [ ] A - [ ] B - [ ] C diff --git a/.pipeline/build-metabase.js b/.pipeline/build-metabase.js index 35935c29..f56e6412 100755 --- a/.pipeline/build-metabase.js +++ b/.pipeline/build-metabase.js @@ -1,5 +1,5 @@ -'use strict'; -const task = require('./lib/build-metabase.js') -const settings = require('./lib/config.js') +"use strict"; +const task = require("./lib/build-metabase.js"); +const settings = require("./lib/config.js"); -task(Object.assign(settings, { phase: 'build'})) +task(Object.assign(settings, { phase: "build" })); diff --git a/.pipeline/build-patroni.js b/.pipeline/build-patroni.js index f9dbb87d..993e6e72 100755 --- a/.pipeline/build-patroni.js +++ b/.pipeline/build-patroni.js @@ -1,5 +1,5 @@ -'use strict'; -const task = require('./lib/build-patroni.js') -const settings = require('./lib/config.js') +"use strict"; +const task = require("./lib/build-patroni.js"); +const settings = require("./lib/config.js"); -task(Object.assign(settings, { phase: 'build'})) +task(Object.assign(settings, { phase: "build" })); diff --git a/.pipeline/build.js b/.pipeline/build.js index 3ac899f8..e73ecef3 100755 --- a/.pipeline/build.js +++ b/.pipeline/build.js @@ -1,5 +1,5 @@ -'use strict'; -const task = require('./lib/build.js') -const settings = require('./lib/config.js') +"use strict"; +const task = require("./lib/build.js"); +const settings = require("./lib/config.js"); -task(Object.assign(settings, { phase: 'build'})) +task(Object.assign(settings, { phase: "build" })); diff --git a/.pipeline/clean-tools.js b/.pipeline/clean-tools.js index 42f4c43e..7d120617 100755 --- a/.pipeline/clean-tools.js +++ b/.pipeline/clean-tools.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/clean-tools.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/clean-tools.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/clean.js b/.pipeline/clean.js index 42231d7f..c4fa6f72 100755 --- a/.pipeline/clean.js +++ b/.pipeline/clean.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/clean.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/clean.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/deploy-knp.js b/.pipeline/deploy-knp.js index ccbc4048..f425f68f 100755 --- a/.pipeline/deploy-knp.js +++ b/.pipeline/deploy-knp.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-knp.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/deploy-knp.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/deploy-metabase.js b/.pipeline/deploy-metabase.js index 02d551f0..e169e82f 100755 --- a/.pipeline/deploy-metabase.js +++ b/.pipeline/deploy-metabase.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-metabase.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/deploy-metabase.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/deploy-patroni.js b/.pipeline/deploy-patroni.js index 348313cd..df570f23 100755 --- a/.pipeline/deploy-patroni.js +++ b/.pipeline/deploy-patroni.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-patroni.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/deploy-patroni.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/deploy-unittest.js b/.pipeline/deploy-unittest.js index c6c95ded..c59c63bf 100644 --- a/.pipeline/deploy-unittest.js +++ b/.pipeline/deploy-unittest.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-unittest.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/deploy-unittest.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/deploy.js b/.pipeline/deploy.js index 59550945..d0eea0ee 100755 --- a/.pipeline/deploy.js +++ b/.pipeline/deploy.js @@ -1,5 +1,5 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy.js') +"use strict"; +const settings = require("./lib/config.js"); +const task = require("./lib/deploy.js"); -task(Object.assign(settings, { phase: settings.options.env})); +task(Object.assign(settings, { phase: settings.options.env })); diff --git a/.pipeline/lib/build-metabase.js b/.pipeline/lib/build-metabase.js index 6ac832d9..0b03c67d 100755 --- a/.pipeline/lib/build-metabase.js +++ b/.pipeline/lib/build-metabase.js @@ -2,16 +2,25 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases.build.namespace }, options), + ); const phase = "build"; let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); // The building of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/metabase/metabase-bc.yaml`, {})); + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/metabase/metabase-bc.yaml`, + {}, + ), + ); oc.applyRecommendedLabels( objects, diff --git a/.pipeline/lib/build-patroni.js b/.pipeline/lib/build-patroni.js index 5ea254bf..052dd1ba 100755 --- a/.pipeline/lib/build-patroni.js +++ b/.pipeline/lib/build-patroni.js @@ -2,16 +2,25 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases.build.namespace }, options), + ); const phase = "build"; let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); // The building of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/build.yaml`, {})); + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/build.yaml`, + {}, + ), + ); oc.applyRecommendedLabels( objects, diff --git a/.pipeline/lib/build.js b/.pipeline/lib/build.js index c9fe35d1..ae7ea643 100755 --- a/.pipeline/lib/build.js +++ b/.pipeline/lib/build.js @@ -2,37 +2,51 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases.build.namespace }, options), + ); const phase = "build"; let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); // The building of your cool app goes here ▼▼▼ // build frontend - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, { - 'param':{ - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'GIT_URL': oc.git.http_url, - 'GIT_REF': oc.git.ref - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + VERSION: phases[phase].tag, + GIT_URL: oc.git.http_url, + GIT_REF: oc.git.ref, + }, + }, + ), + ); //build backend - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-bc.yaml`, { - 'param':{ - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'GIT_URL': oc.git.http_url, - 'GIT_REF': oc.git.ref - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/backend/backend-bc.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + VERSION: phases[phase].tag, + GIT_URL: oc.git.http_url, + GIT_REF: oc.git.ref, + }, + }, + ), + ); oc.applyRecommendedLabels( objects, diff --git a/.pipeline/lib/clean-tools.js b/.pipeline/lib/clean-tools.js index 0521bd6f..1b953d55 100755 --- a/.pipeline/lib/clean-tools.js +++ b/.pipeline/lib/clean-tools.js @@ -27,15 +27,16 @@ const getTargetPhases = (env, phases) => { return target_phase; }; -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases.build.namespace }, options), + ); const target_phases = getTargetPhases(options.env, phases); - target_phases.forEach(k => { + target_phases.forEach((k) => { if (phases.hasOwnProperty(k)) { - const phase = phases[k]; oc.namespace(phase.namespace); @@ -44,7 +45,7 @@ module.exports = settings => { namespace: phase.namespace, }); - buildConfigs.forEach(bc => { + buildConfigs.forEach((bc) => { if (bc.spec.output.to.kind == "ImageStreamTag") { oc.delete([`ImageStreamTag/${bc.spec.output.to.name}`], { "ignore-not-found": "true", @@ -56,9 +57,8 @@ module.exports = settings => { "ignore-not-found": "true", wait: "true", namespace: phase.namespace, - }); + }); }); - } }); }; diff --git a/.pipeline/lib/clean.js b/.pipeline/lib/clean.js index 7ec61c13..fdfa3d7f 100755 --- a/.pipeline/lib/clean.js +++ b/.pipeline/lib/clean.js @@ -27,17 +27,17 @@ const getTargetPhases = (env, phases) => { return target_phase; }; -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases.build.namespace }, options), + ); const target_phases = getTargetPhases(options.env, phases); - target_phases.forEach(k => { - + target_phases.forEach((k) => { //k is dve, test or prod if (phases.hasOwnProperty(k)) { - const phase = phases[k]; oc.namespace(phase.namespace); @@ -45,17 +45,20 @@ module.exports = settings => { selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, namespace: phase.namespace, }); - deploymentConfigs.forEach(dc => { - dc.spec.triggers.forEach(trigger => { + deploymentConfigs.forEach((dc) => { + dc.spec.triggers.forEach((trigger) => { if ( - trigger.type == "ImageChange" && - trigger.imageChangeParams.from.kind == "ImageStreamTag" + trigger.type == "ImageChange" && + trigger.imageChangeParams.from.kind == "ImageStreamTag" ) { - oc.delete([`ImageStreamTag/${trigger.imageChangeParams.from.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); + oc.delete( + [`ImageStreamTag/${trigger.imageChangeParams.from.name}`], + { + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }, + ); } }); oc.delete([`DeploymentConfig/${dc.metadata.name}`], { @@ -66,44 +69,42 @@ module.exports = settings => { }); oc.raw( "delete", - ["Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints,service,route"], + [ + "Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints,service,route", + ], { selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, wait: "true", namespace: phase.namespace, - } + }, ); //get all statefulsets before they are deleted const statefulsets = oc.get("statefulset", { selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, namespace: phase.namespace, - }); + }); //remove all the PVCs associated with each statefulset, after they get deleted by above delete all operation - statefulsets.forEach(statefulset => { + statefulsets.forEach((statefulset) => { //delete StatefulSet oc.delete([`StatefulSet/${statefulset.metadata.name}`], { "ignore-not-found": "true", wait: "true", namespace: phase.namespace, - }); + }); //delete configmaps create by patroni let patroniConfigmaps = oc.get("configmap", { selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, namespace: phase.namespace, }); - if(Object.entries(patroniConfigmaps).length > 0) { - oc.raw( - "delete", - ["configmap"], - { - selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, - wait: "true", - "ignore-not-found": "true", - namespace: phase.namespace, - }, - ); - }; + if (Object.entries(patroniConfigmaps).length > 0) { + oc.raw("delete", ["configmap"], { + selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, + wait: "true", + "ignore-not-found": "true", + namespace: phase.namespace, + }); + } //delete PVCs mounted for statfulset oc.raw("delete", ["pvc"], { selector: `app=${phase.instance},statefulset=${statefulset.metadata.name},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, @@ -111,22 +112,20 @@ module.exports = settings => { wait: "true", namespace: phase.namespace, }); - }); //remove all PR's network policies const knps = oc.get("networkpolicies", { selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, namespace: phase.namespace, - }); - knps.forEach(knp => { + }); + knps.forEach((knp) => { oc.delete([`networkpolicy/${knp.metadata.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); }); - } }); }; diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index b03f1886..f02bae89 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -1,9 +1,9 @@ -'use strict'; -const options= require('@bcgov/pipeline-cli').Util.parseArguments() -const changeId = options.pr //aka pull-request -const version = '0.2.0' -const name = 'cthub' -const ocpName = 'apps.silver.devops' +"use strict"; +const options = require("@bcgov/pipeline-cli").Util.parseArguments(); +const changeId = options.pr; //aka pull-request +const version = "0.2.0"; +const name = "cthub"; +const ocpName = "apps.silver.devops"; //if work directly on bcgov repo, the value is bcgov //if work on forked developer repo, the value is the developer's GitHub Id @@ -26,57 +26,220 @@ Set the cpu usage 20m as the lowest Set the limit as two times of request */ -options.git.owner='bcgov' -options.git.repository='cthub' +options.git.owner = "bcgov"; +options.git.repository = "cthub"; const phases = { + build: { + namespace: "30b186-tools", + transient: true, + name: `${name}`, + phase: "build", + changeId: `${changeId}`, + suffix: `-build-${changeId}`, + instance: `${name}-build-${changeId}`, + version: `${version}-${changeId}`, + tag: `build-${version}-${changeId}`, + ocpName: `${ocpName}`, + }, - build: {namespace:'30b186-tools' , transient:true, name: `${name}`, phase: 'build', - changeId:`${changeId}`, suffix: `-build-${changeId}` , instance: `${name}-build-${changeId}`, - version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, ocpName: `${ocpName}`}, + dev: { + namespace: "30b186-dev", + transient: true, + name: `${name}`, + ssoSuffix: "-dev", + ssoName: "dev.oidc.gov.bc.ca", + phase: "dev", + changeId: `${changeId}`, + suffix: "-dev", + instance: `${name}-dev`, + version: `${version}`, + tag: `dev-${version}`, + host: `cthub-dev.${ocpName}.gov.bc.ca`, + djangoDebug: "True", + logoutHostName: "logontest7.gov.bc.ca", + dbHost: "cthub-crunchy-dev-pgbouncer", + metabaseCpuRequest: "200m", + metabaseCpuLimit: "300m", + metabaseMemoryRequest: "500Mi", + metabaseMemoryLimit: "2Gi", + metabaseReplicas: 1, + frontendCpuRequest: "400m", + frontendCpuLimit: "800m", + frontendMemoryRequest: "600Mi", + frontendMemoryLimit: "1200Mi", + frontendReplicas: 1, + backendCpuRequest: "50m", + backendCpuLimit: "100m", + backendMemoryRequest: "520Mi", + backendMemoryLimit: "1Gi", + backendHealthCheckDelay: 30, + backendHost: `cthub-backend-dev.${ocpName}.gov.bc.ca`, + backendReplicas: 1, + minioCpuRequest: "30m", + minioCpuLimit: "100m", + minioMemoryRequest: "150Mi", + minioMemoryLimit: "300Mi", + minioPvcSize: "3Gi", + minioBucketName: "zevadv", + schemaspyCpuRequest: "50m", + schemaspyCpuLimit: "200m", + schemaspyMemoryRequest: "150M", + schemaspyMemoryLimit: "300M", + schemaspyHealthCheckDelay: 160, + rabbitmqCpuRequest: "250m", + rabbitmqCpuLimit: "700m", + rabbitmqMemoryRequest: "500M", + rabbitmqMemoryLimit: "1G", + rabbitmqPvcSize: "1G", + rabbitmqReplica: 1, + rabbitmqPostStartSleep: 120, + storageClass: "netapp-block-standard", + patroniCpuRequest: "200m", + patroniCpuLimit: "400m", + patroniMemoryRequest: "250Mi", + patroniMemoryLimit: "500Mi", + patroniPvcSize: "2G", + patroniReplica: 2, + storageClass: "netapp-block-standard", + ocpName: `${ocpName}`, + }, - dev: {namespace:'30b186-dev', transient:true, name: `${name}`, ssoSuffix:'-dev', - ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: '-dev', - instance: `${name}-dev` , version:`${version}`, tag:`dev-${version}`, - host: `cthub-dev.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', dbHost: 'cthub-crunchy-dev-pgbouncer', - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev.${ocpName}.gov.bc.ca`, backendReplicas: 1, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', minioBucketName: 'zevadv', - schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '2G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, - - test: {namespace:'30b186-test', name: `${name}`, ssoSuffix:'-test', - ssoName:'test.oidc.gov.bc.ca', phase: 'test' , changeId:`${changeId}`, suffix: `-test`, - instance: `${name}-test`, version:`${version}`, tag:`test-${version}`, - host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest7.gov.bc.ca', dbHost: 'cthub-crunchy-test-pgbouncer', - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevats', - schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '700M', rabbitmqPvcSize: '1G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, - - prod: {namespace:'30b186-prod', name: `${name}`, ssoSuffix:'', - ssoName:'oidc.gov.bc.ca', phase: 'prod' , changeId:`${changeId}`, suffix: `-prod`, - instance: `${name}-prod`, version:`${version}`, tag:`prod-${version}`, - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - host: `cthub-prod.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logon7.gov.bc.ca', dbHost: 'patroni-master-prod', - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevapr', - schemaspyCpuRequest: '50m', schemaspyCpuLimit: '400m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '5G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '8G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`} + test: { + namespace: "30b186-test", + name: `${name}`, + ssoSuffix: "-test", + ssoName: "test.oidc.gov.bc.ca", + phase: "test", + changeId: `${changeId}`, + suffix: `-test`, + instance: `${name}-test`, + version: `${version}`, + tag: `test-${version}`, + host: `cthub-test.${ocpName}.gov.bc.ca`, + djangoDebug: "False", + logoutHostName: "logontest7.gov.bc.ca", + dbHost: "cthub-crunchy-test-pgbouncer", + metabaseCpuRequest: "200m", + metabaseCpuLimit: "300m", + metabaseMemoryRequest: "500Mi", + metabaseMemoryLimit: "2Gi", + metabaseReplicas: 1, + frontendCpuRequest: "400m", + frontendCpuLimit: "800m", + frontendMemoryRequest: "600Mi", + frontendMemoryLimit: "1200Mi", + frontendReplicas: 1, + frontendMinReplicas: 1, + frontendMaxReplicas: 3, + backendCpuRequest: "50m", + backendCpuLimit: "100m", + backendMemoryRequest: "520Mi", + backendMemoryLimit: "1Gi", + backendHealthCheckDelay: 30, + backendReplicas: 1, + backendMinReplicas: 1, + backendMaxReplicas: 3, + backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, + minioCpuRequest: "30m", + minioCpuLimit: "100m", + minioMemoryRequest: "150Mi", + minioMemoryLimit: "300Mi", + minioPvcSize: "3G", + minioBucketName: "zevats", + schemaspyCpuRequest: "20m", + schemaspyCpuLimit: "200m", + schemaspyMemoryRequest: "150M", + schemaspyMemoryLimit: "300M", + schemaspyHealthCheckDelay: 160, + rabbitmqCpuRequest: "250m", + rabbitmqCpuLimit: "700m", + rabbitmqMemoryRequest: "500M", + rabbitmqMemoryLimit: "700M", + rabbitmqPvcSize: "1G", + rabbitmqReplica: 2, + rabbitmqPostStartSleep: 120, + storageClass: "netapp-block-standard", + patroniCpuRequest: "200m", + patroniCpuLimit: "400m", + patroniMemoryRequest: "250Mi", + patroniMemoryLimit: "500Mi", + patroniPvcSize: "5G", + patroniReplica: 2, + storageClass: "netapp-block-standard", + ocpName: `${ocpName}`, + }, + prod: { + namespace: "30b186-prod", + name: `${name}`, + ssoSuffix: "", + ssoName: "oidc.gov.bc.ca", + phase: "prod", + changeId: `${changeId}`, + suffix: `-prod`, + instance: `${name}-prod`, + version: `${version}`, + tag: `prod-${version}`, + metabaseCpuRequest: "200m", + metabaseCpuLimit: "300m", + metabaseMemoryRequest: "500Mi", + metabaseMemoryLimit: "2Gi", + metabaseReplicas: 1, + host: `cthub-prod.${ocpName}.gov.bc.ca`, + djangoDebug: "False", + logoutHostName: "logon7.gov.bc.ca", + dbHost: "patroni-master-prod", + frontendCpuRequest: "400m", + frontendCpuLimit: "800m", + frontendMemoryRequest: "600Mi", + frontendMemoryLimit: "1200Mi", + frontendReplicas: 1, + frontendMinReplicas: 1, + frontendMaxReplicas: 3, + backendCpuRequest: "50m", + backendCpuLimit: "100m", + backendMemoryRequest: "520Mi", + backendMemoryLimit: "1Gi", + backendHealthCheckDelay: 30, + backendReplicas: 1, + backendMinReplicas: 1, + backendMaxReplicas: 3, + backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, + minioCpuRequest: "30m", + minioCpuLimit: "100m", + minioMemoryRequest: "150Mi", + minioMemoryLimit: "300Mi", + minioPvcSize: "3G", + minioBucketName: "zevapr", + schemaspyCpuRequest: "50m", + schemaspyCpuLimit: "400m", + schemaspyMemoryRequest: "150M", + schemaspyMemoryLimit: "300M", + schemaspyHealthCheckDelay: 160, + rabbitmqCpuRequest: "250m", + rabbitmqCpuLimit: "700m", + rabbitmqMemoryRequest: "500M", + rabbitmqMemoryLimit: "1G", + rabbitmqPvcSize: "5G", + rabbitmqReplica: 2, + rabbitmqPostStartSleep: 120, + storageClass: "netapp-block-standard", + patroniCpuRequest: "200m", + patroniCpuLimit: "400m", + patroniMemoryRequest: "250Mi", + patroniMemoryLimit: "500Mi", + patroniPvcSize: "8G", + patroniReplica: 3, + storageClass: "netapp-block-standard", + ocpName: `${ocpName}`, + }, }; // This callback forces the node process to exit as failure. -process.on('unhandledRejection', (reason) => { +process.on("unhandledRejection", (reason) => { console.log(reason); process.exit(1); }); -module.exports = exports = {phases, options}; +module.exports = exports = { phases, options }; diff --git a/.pipeline/lib/deploy-knp.js b/.pipeline/lib/deploy-knp.js index 6458596c..7fbeef2a 100755 --- a/.pipeline/lib/deploy-knp.js +++ b/.pipeline/lib/deploy-knp.js @@ -2,33 +2,46 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; const phase = options.env; const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases[phase].namespace }, options), + ); //add Valid Redirect URIs for the pull request to keycloak - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); var objects = []; // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/knp/knp-env-pr.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix, - 'ENVIRONMENT': phases[phase].phase - } - })) - + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/knp/knp-env-pr.yaml`, + { + param: { + SUFFIX: phases[phase].suffix, + ENVIRONMENT: phases[phase].phase, + }, + }, + ), + ); + oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams( + objects, + phases[phase].tag, + phases.build.namespace, + phases.build.tag, ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); oc.applyAndDeploy(objects, phases[phase].instance); - }; diff --git a/.pipeline/lib/deploy-metabase.js b/.pipeline/lib/deploy-metabase.js index 16a5c424..e775b9d7 100755 --- a/.pipeline/lib/deploy-metabase.js +++ b/.pipeline/lib/deploy-metabase.js @@ -3,37 +3,50 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); //const KeyCloakClient = require('./keycloak'); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; const phase = options.env; const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases[phase].namespace }, options), + ); - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); var objects = []; // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/metabase-postgresql/metabase-dc.yaml`, { - 'param': { - 'ENV_NAME': phases[phase].phase, - 'SUFFIX': phases[phase].suffix, - 'CPU_REQUEST': phases[phase].metabaseCpuRequest, - 'CPU_LIMIT': phases[phase].metabaseCpuLimit, - 'MEMORY_REQUEST': phases[phase].metabaseMemoryRequest, - 'MEMORY_LIMIT': phases[phase].metabaseMemoryLimit, - 'REPLICAS': phases[phase].metabaseReplicas, - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/metabase-postgresql/metabase-dc.yaml`, + { + param: { + ENV_NAME: phases[phase].phase, + SUFFIX: phases[phase].suffix, + CPU_REQUEST: phases[phase].metabaseCpuRequest, + CPU_LIMIT: phases[phase].metabaseCpuLimit, + MEMORY_REQUEST: phases[phase].metabaseMemoryRequest, + MEMORY_LIMIT: phases[phase].metabaseMemoryLimit, + REPLICAS: phases[phase].metabaseReplicas, + }, + }, + ), + ); oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams( + objects, + phases[phase].tag, + phases.build.namespace, + phases.build.tag, ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); oc.applyAndDeploy(objects, phases[phase].instance); - }; diff --git a/.pipeline/lib/deploy-patroni.js b/.pipeline/lib/deploy-patroni.js index e7eb5f97..42494d70 100755 --- a/.pipeline/lib/deploy-patroni.js +++ b/.pipeline/lib/deploy-patroni.js @@ -3,12 +3,14 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); //const KeyCloakClient = require('./keycloak'); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; const phase = options.env; const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases[phase].namespace }, options), + ); //add Valid Redirect URIs for the pull request to keycloak /************ @@ -18,37 +20,53 @@ module.exports = settings => { } *************/ - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); var objects = []; // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/prerequisite.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/prerequisite.yaml`, + { + param: { + SUFFIX: phases[phase].suffix, + }, + }, + ), + ); - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/deploy.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix, - 'CPU_REQUEST': phases[phase].patroniCpuRequest, - 'CPU_LIMIT': phases[phase].patroniCpuLimit, - 'MEMORY_REQUEST': phases[phase].patroniMemoryRequest, - 'MEMORY_LIMIT': phases[phase].patroniMemoryLimit, - 'REPLICAS': phases[phase].patroniReplica, - 'PVC_SIZE': phases[phase].patroniPvcSize, - 'STORAGE_CLASS': phases[phase].storageClass - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/deploy.yaml`, + { + param: { + SUFFIX: phases[phase].suffix, + CPU_REQUEST: phases[phase].patroniCpuRequest, + CPU_LIMIT: phases[phase].patroniCpuLimit, + MEMORY_REQUEST: phases[phase].patroniMemoryRequest, + MEMORY_LIMIT: phases[phase].patroniMemoryLimit, + REPLICAS: phases[phase].patroniReplica, + PVC_SIZE: phases[phase].patroniPvcSize, + STORAGE_CLASS: phases[phase].storageClass, + }, + }, + ), + ); oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams( + objects, + phases[phase].tag, + phases.build.namespace, + phases.build.tag, ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); oc.applyAndDeploy(objects, phases[phase].instance); - }; diff --git a/.pipeline/lib/deploy-unittest.js b/.pipeline/lib/deploy-unittest.js index 93e01e50..a5ec5f5c 100644 --- a/.pipeline/lib/deploy-unittest.js +++ b/.pipeline/lib/deploy-unittest.js @@ -2,66 +2,87 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; const phase = options.env; const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases[phase].namespace }, options), + ); - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); var objects = []; // The deployment of your cool app goes here ▼▼▼ //deploy separate database and backend pod for unit test - if( phase === 'dev' ) { - + if (phase === "dev") { //create unit test database init scripts - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/zeva-postgresql-init.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/unittest/zeva-postgresql-init.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + }, + }, + ), + ); //deploy postgresql unit test - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/postgresql-dc-unittest.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'ENV_NAME': phases[phase].phase - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/unittest/postgresql-dc-unittest.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + ENV_NAME: phases[phase].phase, + }, + }, + ), + ); //deploy backend unit test - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/backend-dc-unittest.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'BACKEND_HOST_NAME': phases[phase].backendHost, - 'RABBITMQ_CLUSTER_NAME': 'rabbitmq-cluster', - 'CPU_REQUEST': phases[phase].backendCpuRequest, - 'CPU_LIMIT': '700m', - 'MEMORY_REQUEST': phases[phase].backendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].backendMemoryLimit, - 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay, - 'REPLICAS': phases[phase].backendReplicas - } - })) - + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/unittest/backend-dc-unittest.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + VERSION: phases[phase].tag, + ENV_NAME: phases[phase].phase, + BACKEND_HOST_NAME: phases[phase].backendHost, + RABBITMQ_CLUSTER_NAME: "rabbitmq-cluster", + CPU_REQUEST: phases[phase].backendCpuRequest, + CPU_LIMIT: "700m", + MEMORY_REQUEST: phases[phase].backendMemoryRequest, + MEMORY_LIMIT: phases[phase].backendMemoryLimit, + HEALTH_CHECK_DELAY: phases[phase].backendHealthCheckDelay, + REPLICAS: phases[phase].backendReplicas, + }, + }, + ), + ); } oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams( + objects, + phases[phase].tag, + phases.build.namespace, + phases.build.tag, ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); oc.applyAndDeploy(objects, phases[phase].instance); - }; diff --git a/.pipeline/lib/deploy.js b/.pipeline/lib/deploy.js index 8b5b0f54..7575410e 100755 --- a/.pipeline/lib/deploy.js +++ b/.pipeline/lib/deploy.js @@ -3,12 +3,14 @@ const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); const path = require("path"); //const KeyCloakClient = require('./keycloak'); -module.exports = settings => { +module.exports = (settings) => { const phases = settings.phases; const options = settings.options; const phase = options.env; const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); + const oc = new OpenShiftClientX( + Object.assign({ namespace: phases[phase].namespace }, options), + ); //add Valid Redirect URIs for the pull request to keycloak /************ @@ -18,52 +20,68 @@ module.exports = settings => { } *************/ - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); + const templatesLocalBaseUrl = oc.toFileUrl( + path.resolve(__dirname, "../../openshift"), + ); var objects = []; // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'HOST_NAME': phases[phase].host, - 'CPU_REQUEST': phases[phase].frontendCpuRequest, - 'CPU_LIMIT': phases[phase].frontendCpuLimit, - 'MEMORY_REQUEST': phases[phase].frontendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].frontendMemoryLimit, - 'REPLICAS': phases[phase].frontendReplicas - } - })) - - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-dc.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'BACKEND_HOST_NAME': phases[phase].backendHost, - 'CPU_REQUEST': phases[phase].backendCpuRequest, - 'CPU_LIMIT': phases[phase].backendCpuLimit, - 'MEMORY_REQUEST': phases[phase].backendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].backendMemoryLimit, - 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay, - 'REPLICAS': phases[phase].backendReplicas, - 'DB_HOST': phases[phase].dbHost, - 'MINIO_BUCKET_NAME': phases[phase].minioBucketName - } - })) + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + VERSION: phases[phase].tag, + ENV_NAME: phases[phase].phase, + HOST_NAME: phases[phase].host, + CPU_REQUEST: phases[phase].frontendCpuRequest, + CPU_LIMIT: phases[phase].frontendCpuLimit, + MEMORY_REQUEST: phases[phase].frontendMemoryRequest, + MEMORY_LIMIT: phases[phase].frontendMemoryLimit, + REPLICAS: phases[phase].frontendReplicas, + }, + }, + ), + ); + + objects = objects.concat( + oc.processDeploymentTemplate( + `${templatesLocalBaseUrl}/templates/backend/backend-dc.yaml`, + { + param: { + NAME: phases[phase].name, + SUFFIX: phases[phase].suffix, + VERSION: phases[phase].tag, + ENV_NAME: phases[phase].phase, + BACKEND_HOST_NAME: phases[phase].backendHost, + CPU_REQUEST: phases[phase].backendCpuRequest, + CPU_LIMIT: phases[phase].backendCpuLimit, + MEMORY_REQUEST: phases[phase].backendMemoryRequest, + MEMORY_LIMIT: phases[phase].backendMemoryLimit, + HEALTH_CHECK_DELAY: phases[phase].backendHealthCheckDelay, + REPLICAS: phases[phase].backendReplicas, + DB_HOST: phases[phase].dbHost, + MINIO_BUCKET_NAME: phases[phase].minioBucketName, + }, + }, + ), + ); oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams( + objects, + phases[phase].tag, + phases.build.namespace, + phases.build.tag, ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); oc.applyAndDeploy(objects, phases[phase].instance); - }; diff --git a/.pipeline/lib/keycloak.js b/.pipeline/lib/keycloak.js index 5310a10b..122eb75a 100644 --- a/.pipeline/lib/keycloak.js +++ b/.pipeline/lib/keycloak.js @@ -3,135 +3,131 @@ const axios = require("axios"); const _ = require("lodash"); //code reference https://github.com/bcgov/HMCR/blob/0.7/.pipeline/lib/keycloak.js module.exports = class KeyCloakClient { - constructor(settings, oc) { - this.phases = settings.phases; - this.options = settings.options; - this.oc = oc; - this.zevaHost = this.phases.dev.host; + constructor(settings, oc) { + this.phases = settings.phases; + this.options = settings.options; + this.oc = oc; + this.zevaHost = this.phases.dev.host; + } + + async init() { + this.getSecrets(); + + this.apiTokenPath = `/auth/realms/${this.realmId}/protocol/openid-connect/token`; + this.zevaPublicClientPath = `auth/admin/realms/${this.realmId}/clients/${this.zevaClientId}`; + + this.api = axios.create({ + baseURL: `https://${this.ssoHost}`, + }); + + const token = await this.getAccessToken(); + + this.api.defaults.headers.common = { + Authorization: `Bearer ${token}`, + }; + } + + getSecrets() { + const keycloakSecret = this.oc.raw("get", [ + "secret", + "zeva-keycloak", + "-o", + "json", + ]); + const secret = JSON.parse(keycloakSecret.stdout).data; + + this.clientId = Buffer.from(secret.clientId, "base64").toString(); + this.clientSecret = Buffer.from(secret.clientSecret, "base64").toString(); + this.zevaClientId = Buffer.from(secret.zevaPublic, "base64").toString(); + this.realmId = Buffer.from(secret.realmId, "base64").toString(); + this.ssoHost = Buffer.from(secret.host, "base64").toString(); + + if (!this.clientId || !this.clientSecret || !this.zevaClientId) + throw new Error( + "Unable to retrieve Keycloak service account info from OpenShift", + ); + } + + getAccessToken() { + return this.api + .post(this.apiTokenPath, "grant_type=client_credentials", { + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + auth: { + username: this.clientId, + password: this.clientSecret, + }, + }) + .then(function (response) { + if (!response.data.access_token) + throw new Error( + "Unable to retrieve Keycloak service account access token", + ); + + return Promise.resolve(response.data.access_token); + }); + } + + async getUris() { + const response = await this.api.get(this.zevaPublicClientPath); + + const data = { ...response.data }; + const redirectUris = data.redirectUris; + + return { data, redirectUris }; + } + + async addUris() { + await this.init(); + + console.log("Attempting to add RedirectUri and WebOrigins"); + + const { data, redirectUris } = await this.getUris(); + + const putData = { id: data.id, clientId: data.clientId }; + + const hasRedirectUris = redirectUris.find((item) => + item.includes(this.zevaHost), + ); + + if (!hasRedirectUris) { + redirectUris.push(`https://${this.zevaHost}/*`); + putData.redirectUris = redirectUris; } - async init() { - - this.getSecrets(); - - this.apiTokenPath = `/auth/realms/${this.realmId}/protocol/openid-connect/token`; - this.zevaPublicClientPath = `auth/admin/realms/${this.realmId}/clients/${this.zevaClientId}`; - - this.api = axios.create({ - baseURL: `https://${this.ssoHost}` - }); - - const token = await this.getAccessToken(); - - this.api.defaults.headers.common = { - Authorization: `Bearer ${token}` - }; - } - - getSecrets() { - const keycloakSecret = this.oc.raw("get", [ - "secret", - "zeva-keycloak", - "-o", - "json" - ]); - const secret = JSON.parse(keycloakSecret.stdout).data; - - this.clientId = Buffer.from(secret.clientId, "base64").toString(); - this.clientSecret = Buffer.from(secret.clientSecret, "base64").toString(); - this.zevaClientId = Buffer.from(secret.zevaPublic, "base64").toString(); - this.realmId = Buffer.from(secret.realmId, "base64").toString(); - this.ssoHost = Buffer.from(secret.host, "base64").toString(); - - if (!this.clientId || !this.clientSecret || !this.zevaClientId) - throw new Error( - "Unable to retrieve Keycloak service account info from OpenShift" - ); - } - - getAccessToken() { - - return this.api - .post(this.apiTokenPath, "grant_type=client_credentials", { - headers: { "Content-Type": "application/x-www-form-urlencoded" }, - auth: { - username: this.clientId, - password: this.clientSecret - } - }) - .then(function(response) { - if (!response.data.access_token) - throw new Error( - "Unable to retrieve Keycloak service account access token" - ); - - return Promise.resolve(response.data.access_token); - }); - } - - async getUris() { - - const response = await this.api.get(this.zevaPublicClientPath); - - const data = { ...response.data }; - const redirectUris = data.redirectUris; - - return { data, redirectUris }; + if (!hasRedirectUris) { + this.api + .put(this.zevaPublicClientPath, putData) + .then(() => console.log("RedirectUri and WebOrigins added.")); + } else { + console.log("RedirectUri and WebOrigins add skipped."); } + } - async addUris() { - await this.init(); + async removeUris() { + await this.init(); - console.log("Attempting to add RedirectUri and WebOrigins"); + console.log("Attempting to remove RedirectUri and WebOrigins"); - const { data, redirectUris} = await this.getUris(); + const { data, redirectUris } = await this.getUris(); - const putData = { id: data.id, clientId: data.clientId }; + const putData = { id: data.id, clientId: data.clientId }; - const hasRedirectUris = redirectUris.find(item => - item.includes(this.zevaHost) - ); + const hasRedirectUris = redirectUris.find((item) => + item.includes(this.zevaHost), + ); - if (!hasRedirectUris) { - redirectUris.push(`https://${this.zevaHost}/*`); - putData.redirectUris = redirectUris; - } - - if (!(hasRedirectUris)) { - this.api - .put(this.zevaPublicClientPath, putData) - .then(() => console.log("RedirectUri and WebOrigins added.")); - } else { - console.log("RedirectUri and WebOrigins add skipped."); - } + if (hasRedirectUris) { + putData.redirectUris = redirectUris.filter( + (item) => !item.includes(this.zevaHost), + ); } - async removeUris() { - await this.init(); - - console.log("Attempting to remove RedirectUri and WebOrigins"); - - const { data, redirectUris } = await this.getUris(); - - const putData = { id: data.id, clientId: data.clientId }; - - const hasRedirectUris = redirectUris.find(item => - item.includes(this.zevaHost) - ); - - if (hasRedirectUris) { - putData.redirectUris = redirectUris.filter( - item => !item.includes(this.zevaHost) - ); - } - - if (hasRedirectUris) { - this.api - .put(this.zevaPublicClientPath, putData) - .then(() => console.log("RedirectUri and WebOrigins removed.")); - } else { - console.log("RedirectUri and WebOrigins remove skipped."); - } - + if (hasRedirectUris) { + this.api + .put(this.zevaPublicClientPath, putData) + .then(() => console.log("RedirectUri and WebOrigins removed.")); + } else { + console.log("RedirectUri and WebOrigins remove skipped."); } + } }; diff --git a/README.md b/README.md index 7b1a67cf..1bee4d3e 100644 --- a/README.md +++ b/README.md @@ -1,44 +1,48 @@ # CTHUB + The Clean Transportation Data Hub provides an evidence base for the Clean Transportation Branch through data storage, analysis and visualization, that improves decision making to increase energy efficiency and de-carbonise the transportation system. It aims to be the most comprehensive, reliable and accessible data management system for clean transportation in the world. # Docker Instructions + - Make sure Docker is installed and running -- In your terminal, go to your project folder and execute the following: - - ```docker-compose up``` +- In your terminal, go to your project folder and execute the following: + - `docker-compose up` ## Useful Docker Commands - To access postgres: + - Go to your project folder in your terminal - Execute the following: - - ```docker-compose exec db psql -U postgres``` + - `docker-compose exec db psql -U postgres` - Some notes about the structure of the command - docker-compose exec - this is your standard command to execute something within the context of the docker-compose yml file - db - this is the service you want to execute your command in - psql -U postgres - execute psql with a the default user of postgres - To access the backend: (to do migrations and other stuff with the backend) + - Go to your project folder in your terminal - Execute the following: - - ```docker-compose exec api bash``` + - `docker-compose exec api bash` - Here you can do your standard django stuff like: - - ```python manage.py makemigrations``` - - ```python manage.py migrate``` + - `python manage.py makemigrations` + - `python manage.py migrate` - To access the frontend: (to install/update a package, etc) + - Go to your project folder in your terminal - Execute the following: - - ```docker-compose exec web bash``` + + - `docker-compose exec web bash` - This is where you can make changes to your package.json - You can technically make changes to your packages without going into your container, but you'll need npm installed into your system - - To run in testing mode - if you don't have docker-compose-local-dev.yml locally, create a new file and - add the contents from docker-compose plus a line for: - - KEYCLOAK_TESTING=True - in api environment - + if you don't have docker-compose-local-dev.yml locally, create a new file and + add the contents from docker-compose plus a line for: - KEYCLOAK_TESTING=True + in api environment + to run using this docker file: docker-compose -f docker-compose-local-dev.yml up @@ -46,26 +50,29 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp and uses the user table to get permissions # Rebasing Guide + - To rebase your branch onto the latest release branch: - - ```git fetch upstream``` - - ```git checkout your_branch``` - - ```git rebase --onto A B``` - - Where `upstream` is the remote containing the release branch, and `A` is the hash of the latest commit to the release branch, and `B` is the hash of the commit in `your_branch` such that every commit after `B` ought to be rebased onto the release branch. - - If you run into conflicts while rebasing, you can resolve them in your IDE, and `git add` the resolved changes before finishing the rebase (committing). - - The rebased commits will have different hashes than the old ones, so if you previously pushed `your_branch` to a remote you will have to `git push --force` in order not to end up with additional commits in your remote branch. - - On Github, you can modify the base branch of a PR if you're rebasing from a branch based on a previous release branch to the latest release branch. +- `git fetch upstream` +- `git checkout your_branch` +- `git rebase --onto A B` +- Where `upstream` is the remote containing the release branch, and `A` is the hash of the latest commit to the release branch, and `B` is the hash of the commit in `your_branch` such that every commit after `B` ought to be rebased onto the release branch. +- If you run into conflicts while rebasing, you can resolve them in your IDE, and `git add` the resolved changes before finishing the rebase (committing). +- The rebased commits will have different hashes than the old ones, so if you previously pushed `your_branch` to a remote you will have to `git push --force` in order not to end up with additional commits in your remote branch. +- On Github, you can modify the base branch of a PR if you're rebasing from a branch based on a previous release branch to the latest release branch. # Metabase + - Locally, create a database to store metabase's internals, and use/modify `metabase.env`, django's `settings.DATABASES` and `settings.DATABASE_ROUTERS` to point to said database. - You can create django data migrations to insert your custom queries into the metabase application database. - To create a data migration within the metabase django app: -- ```python manage.py makemigrations --empty metabase``` +- `python manage.py makemigrations --empty metabase` - Then, using `RunPython` and django's `QuerySet` API, you may read/insert/update/delete data from metabase's application database. - For custom queries, the internal metabase table of interest would probably be `report_card` (the associated model is `ReportCard`). - To make your `RunPython` "script" cleaner, consider putting the actual queries themselves in separate sql files and reading from those in `RunPython` - To uncouple metabase from django, simply remove metabase from `settings.INSTALLED_APPS`. # Updating packages + - From time to time, we may become aware of package updates (mainly the packages in package.json (frontend) and requirements.py (backend)). - Tools like Dependabot (https://github.com/dependabot) may raise PRs that update these packages. - If the package that can be updated is a npm package and is a transitive dependency (a dependency of an immediate dependency), we can implement the update using `overrides` (https://docs.npmjs.com/cli/v10/configuring-npm/package-json#overrides). @@ -73,27 +80,27 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp - When an entire image is scanned by some tool, there may be deeper, OS level dependencies that show as being critically out of date/vulnerable; in cases like this, if an updated image is not yet available, there are usually `alpine` versions of images that simply don't include many of these dependencies; whether they will work for our purposes is another question. # License -The code is a fork from Richard's personal project. Please do not clone, copy or replicate this project unless you're authorized to do so. +The code is a fork from Richard's personal project. Please do not clone, copy or replicate this project unless you're authorized to do so. # List of Dev Work | What to do before bringing in a new ticket into a Sprint -This is a list that was created on 2023-02-01 with all Zelda Devs to provide alternative work instead of bringing in a new ticket. +This is a list that was created on 2023-02-01 with all Zelda Devs to provide alternative work instead of bringing in a new ticket. -**Team Rule* Do not bring in ticket After Friday +\*_Team Rule_ Do not bring in ticket After Friday -1. Help another Dev - see if other Devs need help to finish their ticket +1. Help another Dev - see if other Devs need help to finish their ticket -2. PR Reviews – linked to the task above +2. PR Reviews – linked to the task above -3. Writing additional tests – for both tront and back end +3. Writing additional tests – for both tront and back end -4. Take a look at Tech Debt tickets - If we bring in tickets let's bring in Tech Debt first +4. Take a look at Tech Debt tickets - If we bring in tickets let's bring in Tech Debt first -5. Learning time: +5. Learning time: -- Take the opportunity to familiarize yourself with business logic, tech (anything around work we do) +- Take the opportunity to familiarize yourself with business logic, tech (anything around work we do) -- New learning and applying it to our work +- New learning and applying it to our work -- Innovation work +- Innovation work diff --git a/charts/cthub-spilo/Readme.md b/charts/cthub-spilo/Readme.md index 71d31279..ff7fa6d6 100644 --- a/charts/cthub-spilo/Readme.md +++ b/charts/cthub-spilo/Readme.md @@ -1,27 +1,33 @@ ## Before running Helm -* Create secret cthub-patroni-admin - * Create the secret by using cthub/openshift/templates/spilo/cthub-patroni-admin.yaml, the three passwords are generated randomly -* Create secret cthub-patroni-app - * Create the secret by using cthub/openshift-v4/templates/spilo/cthub-patroni-app.yaml, the three password fields must be in sync with the existing secret patroni-dev - * It contains: app-db-name, app-db-password, app-db-username, metabaseuser-name, metabaseuser-password - * The replication- and superuser- are not needed - * If this secret is aleady existed, please verify the password fields +- Create secret cthub-patroni-admin -* Create Object Storage secret for database continuous backup, cthub-object-storage - * Create the secret by using cthub/openshift-v4/templates/object-storage/object-storage-secret.yaml - * The secret should have been created, verify it by using CyberDuck + - Create the secret by using cthub/openshift/templates/spilo/cthub-patroni-admin.yaml, the three passwords are generated randomly -* Create secret cthub-db-backup-s3 - * It includes AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_ENDPOINT - * The values are in sync with secret cthub-object-storage +- Create secret cthub-patroni-app -* Verify values-dev.yaml. Create the bucket on object storage if needed + - Create the secret by using cthub/openshift-v4/templates/spilo/cthub-patroni-app.yaml, the three password fields must be in sync with the existing secret patroni-dev + - It contains: app-db-name, app-db-password, app-db-username, metabaseuser-name, metabaseuser-password + - The replication- and superuser- are not needed + - If this secret is aleady existed, please verify the password fields -* Add new KNPs templates/knp/knp-env-pr-new-cthub-spilo.yaml - * oc process -f ./knp-env-pr-new-cthub-spilo.yaml ENVIRONMENT=test | oc apply -f - -n 30b186-dev +- Create Object Storage secret for database continuous backup, cthub-object-storage + + - Create the secret by using cthub/openshift-v4/templates/object-storage/object-storage-secret.yaml + - The secret should have been created, verify it by using CyberDuck + +- Create secret cthub-db-backup-s3 + + - It includes AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_ENDPOINT + - The values are in sync with secret cthub-object-storage + +- Verify values-dev.yaml. Create the bucket on object storage if needed + +- Add new KNPs templates/knp/knp-env-pr-new-cthub-spilo.yaml + - oc process -f ./knp-env-pr-new-cthub-spilo.yaml ENVIRONMENT=test | oc apply -f - -n 30b186-dev ## Heml command + helm install -n 30b186-dev -f ./values-dev.yaml cthub-spilo . helm uninstall -n 30b186-dev cthub-spilo @@ -32,51 +38,59 @@ helm uninstall -n 30b186-dev cthub-spilo ### Run a final backup on backup container ### Create cthub database user and database -* Login to the cthub-spilo leader pod -* If the username contains upper case letters, should be double quoted - * create user for cthub database, the username should be the same on v10 otherwise the restore may encounter issue - * create user [username] with password '[password]' - * The password can be found in secret cthub-patroni-app - * create cthub database - * create database cthub owner [username] ENCODING 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8' + +- Login to the cthub-spilo leader pod +- If the username contains upper case letters, should be double quoted + - create user for cthub database, the username should be the same on v10 otherwise the restore may encounter issue + - create user [username] with password '[password]' + - The password can be found in secret cthub-patroni-app + - create cthub database + - create database cthub owner [username] ENCODING 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8' + ### Reset postgresql logging -* login cthub-spilo leader pod, run the following psql to only keep 24 hours log files, otherwise they take too much space - ALTER SYSTEM SET log_filename='postgresql-%H.log'; - ALTER SYSTEM SET log_connections='off'; - ALTER SYSTEM SET log_disconnections='off'; - ALTER SYSTEM SET log_checkpoints='off'; - select pg_reload_conf(); + +- login cthub-spilo leader pod, run the following psql to only keep 24 hours log files, otherwise they take too much space + ALTER SYSTEM SET log_filename='postgresql-%H.log'; + ALTER SYSTEM SET log_connections='off'; + ALTER SYSTEM SET log_disconnections='off'; + ALTER SYSTEM SET log_checkpoints='off'; + select pg_reload_conf(); + ### Create metabase user -* login cthub-spilo leader pod - CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; - GRANT CONNECT ON DATABASE cthub TO metabaseuser; - GRANT USAGE ON SCHEMA public TO metabaseuser; - GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; - ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; - verify permissions are granted: select * from information_schema.role_table_grants where grantee='metabaseuser'; + +- login cthub-spilo leader pod + CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; + GRANT CONNECT ON DATABASE cthub TO metabaseuser; + GRANT USAGE ON SCHEMA public TO metabaseuser; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; + verify permissions are granted: select \* from information_schema.role_table_grants where grantee='metabaseuser'; ## Backup the existing v10 database and restore to v14 cluster -* Make sure the application is stopped -* Login to patroni-dev leader pod - * make an empty dir /home/postgres/migration and cd into it - * backup cthub database: pg_dump cthub > cthub.sql -* Restore cthub database - * psql cthub < ./cthub.sql >> ./restore.log 2>&1 - * verify the restore.log when complete - -* Point the applications to v14 cluster, update the enviuronment variables for - * backend: DATABASE_SERVICE_NAME, POSTGRESQL_SERVICE_HOST - * celery: DATABASE_SERVICE_NAME - * scan-handler: DATABASE_SERVICE_NAME -* Bring down the v10 cluster -* Bring down the maintenance page -* Bring up the cthub appliation -* Update patroni backup to only backup minio data -* Update metabase connection from CTHUB -* Update dbServiceName to be cthub-spilo in .pipeline/lib/config.js + +- Make sure the application is stopped +- Login to patroni-dev leader pod + - make an empty dir /home/postgres/migration and cd into it + - backup cthub database: pg_dump cthub > cthub.sql +- Restore cthub database + + - psql cthub < ./cthub.sql >> ./restore.log 2>&1 + - verify the restore.log when complete + +- Point the applications to v14 cluster, update the enviuronment variables for + - backend: DATABASE_SERVICE_NAME, POSTGRESQL_SERVICE_HOST + - celery: DATABASE_SERVICE_NAME + - scan-handler: DATABASE_SERVICE_NAME +- Bring down the v10 cluster +- Bring down the maintenance page +- Bring up the cthub appliation +- Update patroni backup to only backup minio data +- Update metabase connection from CTHUB +- Update dbServiceName to be cthub-spilo in .pipeline/lib/config.js ## Notes for uninstalling cthub-spilo when needed -* After the helm uninstall command, remember to remove the followings: - * The two configmaps: cthub-spilo-config, cthub-spilo-leader - * The PVCs storage-volume-cthub-spilo-* - * The backup bucket in object storage + +- After the helm uninstall command, remember to remove the followings: + - The two configmaps: cthub-spilo-config, cthub-spilo-leader + - The PVCs storage-volume-cthub-spilo-\* + - The backup bucket in object storage diff --git a/charts/spilo/docs/restore.md b/charts/spilo/docs/restore.md index 4f806707..81679496 100644 --- a/charts/spilo/docs/restore.md +++ b/charts/spilo/docs/restore.md @@ -8,11 +8,14 @@ Make sure there are no connections to the database cluster. ### Stop Patroni failover and shutdown Postgresql database -1. On the leader pod, disable the failover +1. On the leader pod, disable the failover + ``` $ patronictl pause [patroni cluster name] ``` -2. On the the secondary pods, stop the postgresql databse + +2. On the the secondary pods, stop the postgresql databse + ``` $ pg_ctl stop ``` @@ -20,6 +23,7 @@ $ pg_ctl stop 3. On leader pod, stop the postgresql databse by running the same command as above 4. Make sure all pods are stopped and maintenance mode is on + ``` $ patronictl list ``` @@ -29,13 +33,16 @@ $ patronictl list Retrieve the latest base backup and all archieved WAL files. 1. On the leader pod, fetch the latest backup from S3 Object Storage + ``` $ envdir /run/etc/wal-e.d/env wal-g backup-fetch /home/postgres/pgdata/pgroot/latest-backup LATEST It copies the latest base backup base_0000..0037 according to the sample below. ``` + ![archieved files on S3 storage](./s3.png) - + 2. Rename data folder to data-ori and rename latest-backup to data + ``` $ cd /home/postgres/pgdata/pgroot $ mv data data-ori @@ -45,7 +52,8 @@ $ mv latest-backup data 3. Download, unzip and copy the files under wal_005 folder to /home/postgres/pgdata/pgroot/allWALs 4. Copy the files didn't had a chance to be archieved -Compare the files under /home/postgres/pgdata/pgroot/data-ori/pg_wal and the filder under wal_005 folder and copy the additional files to /home/postgres/pgdata/pgroot/allWALs if there are any + Compare the files under /home/postgres/pgdata/pgroot/data-ori/pg_wal and the filder under wal_005 folder and copy the additional files to /home/postgres/pgdata/pgroot/allWALs if there are any + ``` According to the sample, the final files in allWALs folder should be 000000010000000000000037.00000028.backup -- from S3 storage @@ -58,16 +66,19 @@ According to the sample, the final files in allWALs folder should be ### Restore 1. Create recovery.signal file + ``` $ touch /home/postgres/pgdata/pgroot/data/recovery.signal ``` 2. Add the recovery command to postgresql.conf + ``` restore_command = 'cp /home/postgres/pgdata/pgroot/allWALs/%f "%p"' ``` 3. Start the postgresql on the leader pod + ``` $ pg_ctl start The file recovery.signal will be removed @@ -78,10 +89,10 @@ The file recovery.signal will be removed 5. Run patronistl list, the maintenance mond is still on 6. Resume the patroni cluster on the leader pod + ``` $ patronictl resume ``` + Now the patroni cluster is fully restored, login to the database to verify the laest changes The restore_command=.. in postgresql.conf is removed automatically. - - diff --git a/django/README.md b/django/README.md index 6bb2b6a2..5e497ad1 100644 --- a/django/README.md +++ b/django/README.md @@ -1,7 +1,9 @@ # Backend ## Database Migrations + Create migration + ```bash # Log in to the api docker docker-compose exec api bash @@ -18,38 +20,44 @@ docker-compose exec db psql -U postgres ``` If you need to change a migration you can back up by doing the following: + 1. List the migrations to find where you want to go back to -> `python manage.py showmigrations` + > `python manage.py showmigrations` 2. Move migration to the appropriat state -> `python manage.py migrate --fake api 0003_name_of_migration_before_one_to_redo` + > `python manage.py migrate --fake api 0003_name_of_migration_before_one_to_redo` 3. Delete the migration file 4. Delete table if necessary 5. Re-run migration -> `python manage.py makemigrations` - + > `python manage.py makemigrations` ## Data Loads + Copy the spreadsheet into the _api_ docker container. + ```bash docker cp 'EV_Fast-Charging Stations_20210520.xlsx' cthub_api_1:/tmp/ ``` + This can also be done by temporarily placing the Excel file in the _django_ folder. This location is mounted onto the container. Log into the docker container and run the following command. + ```bash python manage.py import_charger_rebates '/tmp/EV_Fast-Charging Stations_20210520.xlsx' ``` ## Fixtures + If docker doesn't load your fixtures and the dataset dropdown list is empty use use the same as above to load fixtures docker-compose exec api bash -python manage.py loaddata api/fixtures/0001_add_ldv_rebates_datasets.json -or -python manage.py loaddata api/fixtures/00* +python manage.py loaddata api/fixtures/0001_add_ldv_rebates_datasets.json +or +python manage.py loaddata api/fixtures/00\* ## Creating User Account + After running all the fixtures to create the dataset dropdown list and the user_permissions table. You will need to run a few SQL commands to allow your account to upload documents locally. @@ -58,4 +66,4 @@ insert into user_permission (create_user, permission_id, user_id) values ('test' insert into user_permission (create_user, permission_id, user_id) values ('test', 2, 1); Only after running these will you be able to upload into CTHUB locally. -If you're encountering errors make sure you've run the fixture for creating the user_permission table and that you're not missing any fields in SQL. \ No newline at end of file +If you're encountering errors make sure you've run the fixture for creating the user_permission table and that you're not missing any fields in SQL. diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index a30cc902..0856d9c8 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -64,18 +64,17 @@ const UploadContainer = () => { errors: 0, warnings: 0, }; - + issueArray.forEach((issue) => { - Object.keys(issue).forEach((column) => { const errorDetails = issue[column]; - + Object.keys(errorDetails).forEach((errorType) => { const severity = errorDetails[errorType].Severity; const expectedType = errorDetails[errorType]["Expected Type"]; const rows = errorDetails[errorType].Rows; const rowCount = rows.length; - + if (severity === "Error") { totalIssueCount.errors += rowCount; if (!groupedErrors[column]) { @@ -106,11 +105,9 @@ const UploadContainer = () => { }); }); }); - + return { groupedErrors, groupedWarnings, totalIssueCount }; }; - - const showError = (error) => { const { response: errorResponse } = error; @@ -153,15 +150,13 @@ const UploadContainer = () => { Promise.all(uploadPromises) .then((responses) => { - const errorCheck = responses.some( - (response) => !response.data.success - ); + const errorCheck = responses.some((response) => !response.data.success); setAlertSeverity(errorCheck ? "error" : "success"); const message = responses .map( (response) => - `${response.data.message}${response.data.errors ? "\nErrors: " + response.data.errors.join("\n") : ""}` + `${response.data.message}${response.data.errors ? "\nErrors: " + response.data.errors.join("\n") : ""}`, ) .join("\n"); setAlert(true); @@ -210,7 +205,6 @@ const UploadContainer = () => { confirmAction: () => setOpenDialog(false), }); setOpenDialog(true); - } }) .catch((error) => { @@ -265,14 +259,14 @@ const UploadContainer = () => { }; const handleConfirmDataInsert = () => { - setGroupedWarnings({}) - setGroupedErrors({}) - setTotalIssueCount({}) + setGroupedWarnings({}); + setGroupedErrors({}); + setTotalIssueCount({}); setOpenDialog(false); setAlert(false); setAlertContent(""); doUpload(false); // Upload with the checkForWarnings flag set to false! - setUploadFiles([]) + setUploadFiles([]); }; const handleReplaceDataConfirm = () => { @@ -304,57 +298,57 @@ const UploadContainer = () => { ) : null; - return ( -
-
- <> - - - {(totalIssueCount.errors > 0 || totalIssueCount.warnings > 0) && ( - - - - )} - - +
+ <> + + + {(totalIssueCount.errors > 0 || totalIssueCount.warnings > 0) && ( + + - {adminUser && ( - - - - )} - - -
+ )} + + + + {adminUser && ( + + + + )} +
+
- ); - }; +
+ ); +}; export default withRouter(UploadContainer); diff --git a/frontend/src/uploads/components/UploadIssuesDetail.js b/frontend/src/uploads/components/UploadIssuesDetail.js index f2d93ba3..59ce7d65 100644 --- a/frontend/src/uploads/components/UploadIssuesDetail.js +++ b/frontend/src/uploads/components/UploadIssuesDetail.js @@ -38,7 +38,9 @@ const UploadIssuesDetail = ({ type, issues, totalIssueCount, msg }) => { Column: {column} {Object.keys(issues[column]).map((errorType, index) => (
-
{type.charAt(0).toUpperCase() + type.slice(1)} Name: {errorType}
+
+ {type.charAt(0).toUpperCase() + type.slice(1)} Name: {errorType} +
Expected value:{" "} {issues[column][errorType].ExpectedType || diff --git a/frontend/src/users/UsersContainer.js b/frontend/src/users/UsersContainer.js index 745d31f9..e4a7c4a7 100644 --- a/frontend/src/users/UsersContainer.js +++ b/frontend/src/users/UsersContainer.js @@ -57,7 +57,7 @@ const UsersContainer = (props) => { userToChange.user_permissions[permissionType] = checked; }), ); - setSaveButtonEnabled(true) + setSaveButtonEnabled(true); }, []); const handleDeleteUserClick = (idir) => { diff --git a/frontend/src/users/components/UsersPage.js b/frontend/src/users/components/UsersPage.js index 20304553..a0e124c5 100644 --- a/frontend/src/users/components/UsersPage.js +++ b/frontend/src/users/components/UsersPage.js @@ -23,7 +23,7 @@ const UsersPage = (props) => { newUser, setMessage, handleXClick, - saveButtonEnabled + saveButtonEnabled, } = props; const userRow = (user) => { diff --git a/openshift/README.md b/openshift/README.md index 7376164b..2f8da02f 100644 --- a/openshift/README.md +++ b/openshift/README.md @@ -1,30 +1,36 @@ # Openshift ## Grant Developer's access -* Create the edit/admin RoleBinding for developers GitHub account, + +- Create the edit/admin RoleBinding for developers GitHub account, - kind: User apiGroup: rbac.authorization.k8s.io name: @github - + ## Add role to group otherwise dev, test and prod can't pull images from tools + oc policy add-role-to-group system:image-puller system:serviceaccounts:30b186-dev -n 30b186-tools oc policy add-role-to-group system:image-puller system:serviceaccounts:30b186-test -n 30b186-tools oc policy add-role-to-group system:image-puller system:serviceaccounts:30b186-prod -n 30b186-tools ## Keycloak + openshift/templates/keycloak/README.md ## Minio + openshift/templates/minio/README.md ## Patroni + openshift/templates/patroni-2.1.1/README.md ## Backend + openshift/templates/backend/README.md ## Frontend + openshift/templates/frontend/README.md ## Backup Container - diff --git a/openshift/templates/backend/README.md b/openshift/templates/backend/README.md index f6b579aa..be31c01e 100644 --- a/openshift/templates/backend/README.md +++ b/openshift/templates/backend/README.md @@ -1,4 +1,5 @@ ### Files included + * backend-bc.yaml backend build config * backend-dc.yaml backend deployment config * django-secret-template.yaml create template.django-secret, it is not in pipeline and needs to run independently, it is used by backend-dc.yaml diff --git a/openshift/templates/backup-container-2.3.3/openshift/README.md b/openshift/templates/backup-container-2.3.3/openshift/README.md index 7f63f1ac..074b778a 100644 --- a/openshift/templates/backup-container-2.3.3/openshift/README.md +++ b/openshift/templates/backup-container-2.3.3/openshift/README.md @@ -1,44 +1,45 @@ ### Files included -* backup-build.yaml build backup container image -* backup-config.yaml create backup-conf configmap -* backup-deploy.yaml deploy backup container + +- backup-build.yaml build backup container image +- backup-config.yaml create backup-conf configmap +- backup-deploy.yaml deploy backup container ## Setup Backup container,use Test as example + 1. Build patroni-backup image -oc -n 30b186-tools process -f ./backup/backup-build.yaml \ --p NAME=patroni-backup GIT_REPO_URL=https://github.com/BCDevOps/backup-container.git GIT_REF=2.3.3 OUTPUT_IMAGE_TAG=2.3.3 \ -| oc -n 30b186-tools apply -f - + oc -n 30b186-tools process -f ./backup/backup-build.yaml \ + -p NAME=patroni-backup GIT_REPO_URL=https://github.com/BCDevOps/backup-container.git GIT_REF=2.3.3 OUTPUT_IMAGE_TAG=2.3.3 \ + | oc -n 30b186-tools apply -f - 2. Create the configmap in env project 3. add to ./config/backup.conf, 9pm run backup, 10pm run verification -postgres=patroni-master-test:5432/cthub -postgres=patroni-master-test-metabase:5432/metabase -0 1 * * * default ./backup.sh -s -0 7 * * * default ./backup.sh -s -0 13 * * * default ./backup.sh -s -0 19 * * * default ./backup.sh -s -0 22 * * * default ./backup.sh -s -v all + postgres=patroni-master-test:5432/cthub + postgres=patroni-master-test-metabase:5432/metabase + 0 1 \* \* _ default ./backup.sh -s + 0 7 _ \* _ default ./backup.sh -s + 0 13 _ \* _ default ./backup.sh -s + 0 19 _ \* _ default ./backup.sh -s + 0 22 _ \* \* default ./backup.sh -s -v all 4. create deployment config for backup container -4.1 for test -oc -n 30b186-test process -f ./backup-deploy.yaml \ - -p NAME=patroni-backup \ - -p ENV_NAME_UPPERCASE=TEST \ - -p ENV_NAME_LOWERCASE=test \ - -p BACKUP_STRATEGY=rolling \ - -p BACKUP_DIR=/backups/patroni-backup/ \ - -p DAILY_BACKUPS=28 \ - -p WEEKLY_BACKUPS=16 \ - -p MONTHLY_BACKUPS=4 \ - -p CONFIG_MAP_NAME=backup-conf \ - -p CONFIG_MOUNT_PATH=/ \ - -p BACKUP_VOLUME_NAME=backup-test \ - -p VERIFICATION_VOLUME_NAME=backup-verification-test \ - -p VERIFICATION_VOLUME_MOUNT_PATH=/var/lib/pgsql/data \ - -p CPU_REQUEST=35m \ - -p CPU_LIMIT=70m \ - -p MEMORY_REQUEST=50Mi \ - -p MEMORY_LIMIT=100Mi \ - | oc create -f - -n 30b186-test - + 4.1 for test + oc -n 30b186-test process -f ./backup-deploy.yaml \ + -p NAME=patroni-backup \ + -p ENV_NAME_UPPERCASE=TEST \ + -p ENV_NAME_LOWERCASE=test \ + -p BACKUP_STRATEGY=rolling \ + -p BACKUP_DIR=/backups/patroni-backup/ \ + -p DAILY_BACKUPS=28 \ + -p WEEKLY_BACKUPS=16 \ + -p MONTHLY_BACKUPS=4 \ + -p CONFIG_MAP_NAME=backup-conf \ + -p CONFIG_MOUNT_PATH=/ \ + -p BACKUP_VOLUME_NAME=backup-test \ + -p VERIFICATION_VOLUME_NAME=backup-verification-test \ + -p VERIFICATION_VOLUME_MOUNT_PATH=/var/lib/pgsql/data \ + -p CPU_REQUEST=35m \ + -p CPU_LIMIT=70m \ + -p MEMORY_REQUEST=50Mi \ + -p MEMORY_LIMIT=100Mi \ + | oc create -f - -n 30b186-test diff --git a/openshift/templates/backup-container-2.6.1/cronjob.md b/openshift/templates/backup-container-2.6.1/cronjob.md index 25b81607..e8fee3d3 100644 --- a/openshift/templates/backup-container-2.6.1/cronjob.md +++ b/openshift/templates/backup-container-2.6.1/cronjob.md @@ -1,12 +1,14 @@ # Cronjob prerequisites + Backup PVC: backup KNP: allow CronJob to connect to Spilo # Create database backup cronjob + oc process -f ./db-backup-cronjob-2.6.1.yaml \ JOB_NAME=cthub-db-backup \ JOB_PERSISTENT_STORAGE_NAME=backup \ -SCHEDULE="00 07,21 * * *" \ +SCHEDULE="00 07,21 \* \* \*" \ TAG_NAME=2.6.1 \ DATABASE_SERVICE_NAME=cthub-test-crunchy-replicas \ DATABASE_DEFAULT_PORT=5432 \ @@ -19,4 +21,3 @@ BACKUP_DIR=/backups \ DAILY_BACKUPS=30 \ WEEKLY_BACKUPS=8 \ MONTHLY_BACKUPS=2 | oc apply -f - -n 30b186-test - diff --git a/openshift/templates/crunchydb/readme.md b/openshift/templates/crunchydb/readme.md index 5e6205d2..4e20782d 100644 --- a/openshift/templates/crunchydb/readme.md +++ b/openshift/templates/crunchydb/readme.md @@ -2,30 +2,31 @@ ## Create CrunchyDB Cluster -Create Cluster +Create Cluster ## Migrate metabase database to CrunchyDB ### Create metabase user under psql prompt -create user ******** password '********'; + +create user **\*\*\*\*** password '**\*\*\*\***'; create database metabase owner metabaseuser ENCODING 'utf8' LC_COLLATE = 'en_US.utf-8' LC_CTYPE = 'en_US.utf-8'; CREATE EXTENSION IF NOT EXISTS citext WITH SCHEMA public; -CREATE EXTENSION IF NOT EXISTS set_user WITH SCHEMA public; +CREATE EXTENSION IF NOT EXISTS set_user WITH SCHEMA public; ### Dump and restore metabase data pg_dump -f matabase-data.sql -n public -d metabase -psql metabase < ./matabase-data.sql >> ./metabase-restore.log 2>&1 +psql metabase < ./matabase-data.sql >> ./metabase-restore.log 2>&1 -## Migrate cthub database to CrunchyDB +## Migrate cthub database to CrunchyDB -create user ******** password '********'; +create user **\*\*\*\*** password '**\*\*\*\***'; create database cthub owner cthubpqaitvng ENCODING 'utf8' LC_COLLATE = 'en_US.utf-8' LC_CTYPE = 'en_US.utf-8'; CREATE EXTENSION IF NOT EXISTS citext WITH SCHEMA public; -CREATE EXTENSION IF NOT EXISTS set_user WITH SCHEMA public; +CREATE EXTENSION IF NOT EXISTS set_user WITH SCHEMA public; ### Dump and restore cthub data -pg_dump -f cthub-data.sql -n public -d cthub +pg_dump -f cthub-data.sql -n public -d cthub psql cthub < ./cthub-data.sql >> ./cthub-restore.log 2>&1 - \ No newline at end of file + diff --git a/openshift/templates/keycloak/README.md b/openshift/templates/keycloak/README.md index 10a48e20..bea2d25d 100644 --- a/openshift/templates/keycloak/README.md +++ b/openshift/templates/keycloak/README.md @@ -1,5 +1,7 @@ ### Files included -* keycloak-secret.yaml includes keycloak secrets + +- keycloak-secret.yaml includes keycloak secrets ### Create Secret keycloak-secret.yaml in tools, dev, test and prod env. The value for tools and dev should be same -* The values in the keycloak-secret.yaml are from https://bcgov.github.io/sso-requests/ + +- The values in the keycloak-secret.yaml are from https://bcgov.github.io/sso-requests/ diff --git a/openshift/templates/metabase-postgresql/README.md b/openshift/templates/metabase-postgresql/README.md index e4232fd9..44a2f918 100644 --- a/openshift/templates/metabase-postgresql/README.md +++ b/openshift/templates/metabase-postgresql/README.md @@ -1,29 +1,35 @@ ## Files included -* Dockerfile build metabase -* metabase-bc.yaml build metabase image on Openshift -* metabase-dc.yaml deploy metabase image on Openshift + +- Dockerfile build metabase +- metabase-bc.yaml build metabase image on Openshift +- metabase-dc.yaml deploy metabase image on Openshift ## Metabase to TFRS and ZEVA database access + The network policy allow-patroni-accepts-cthub-metabase-test in both TFRS and ZEVA open the access from the Metabase in CTHUB. ## Create read only user metabaseuser in TFRS, ZEVA and ITVR for Metabase connection from CTHUB + ```//login zeva database as postgres user, psql zeva CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; GRANT CONNECT ON DATABASE [tfrs/zeva/itvr] TO metabaseuser; GRANT USAGE ON SCHEMA public TO metabaseuser; GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; -// verify permissions are granted. +// verify permissions are granted. // select * from information_schema.role_table_grants where grantee='metabaseuser'; ``` + Notes: replace zeva to be tfrs when ron on TFRS project Login to metabase pod and test the connection to tfrs and zeva database Remember store the metabaseuser password in a secret When create database connection in Metabase console, use the patroni master service otherwise the tables will not be shown + ``` curl [patroni master service name].e52f12-[env].svc.cluster.local:5432 ``` ## Notes -* Use metabase-dc-spilo.yaml to deploy metabase with spilo -* Use metabase-dc.yaml to deploy metabase with patroni v12 + +- Use metabase-dc-spilo.yaml to deploy metabase with spilo +- Use metabase-dc.yaml to deploy metabase with patroni v12 diff --git a/openshift/templates/metabase/README.md b/openshift/templates/metabase/README.md index 29ecec6e..5b25b6a3 100644 --- a/openshift/templates/metabase/README.md +++ b/openshift/templates/metabase/README.md @@ -1,6 +1,7 @@ ## Files included -* Dockerfile build metabase -* metabase-bc.yaml build metabase image on Openshift -* metabase-dc.yaml deploy metabase image on Openshift -Notes: the image uses file system as metabase database storage \ No newline at end of file +- Dockerfile build metabase +- metabase-bc.yaml build metabase image on Openshift +- metabase-dc.yaml deploy metabase image on Openshift + +Notes: the image uses file system as metabase database storage diff --git a/openshift/templates/minio/README.md b/openshift/templates/minio/README.md index 1dfbda0e..35f3fba9 100644 --- a/openshift/templates/minio/README.md +++ b/openshift/templates/minio/README.md @@ -1,8 +1,8 @@ ### Files included -* minio-bc.yaml minio build config -* minio-dc.yaml minio deployment config -* secret-template.yaml the secret template +- minio-bc.yaml minio build config +- minio-dc.yaml minio deployment config +- secret-template.yaml the secret template ### build minio @@ -24,4 +24,4 @@ NAME=cthub ENV_NAME=test SUFFIX=-test \ oc process -f ./minio-dc.yaml \ NAME=cthub ENV_NAME=prod SUFFIX=-prod \ -| oc create -f - -n 30b186-prod \ No newline at end of file +| oc create -f - -n 30b186-prod diff --git a/openshift/templates/patroni-2.1.1/README.md b/openshift/templates/patroni-2.1.1/README.md index af3c72b7..b1b3146a 100644 --- a/openshift/templates/patroni-2.1.1/README.md +++ b/openshift/templates/patroni-2.1.1/README.md @@ -1,12 +1,14 @@ ## Build and deploy Patroni 2.1.1 on top of Postgresql 12.4 ### Files included -* The files included are from https://github.com/bcgov/patroni-postgres-container.git -* The post_init.sh is changed to create extensions and metabase db and user -* The deploy.yaml is changed to provide metabase credentials + +- The files included are from https://github.com/bcgov/patroni-postgres-container.git +- The post_init.sh is changed to create extensions and metabase db and user +- The deploy.yaml is changed to provide metabase credentials ### How to create patroni cluster -* Use build.yaml to build patroni image and store it under tools project -* Use secret-template.yaml to create template.paroni-patroni -* Use deploy.yaml to deploy to test and prod environment -* For dev environment, patroni is part of pipeline + +- Use build.yaml to build patroni image and store it under tools project +- Use secret-template.yaml to create template.paroni-patroni +- Use deploy.yaml to deploy to test and prod environment +- For dev environment, patroni is part of pipeline diff --git a/openshift/templates/redis/readme.md b/openshift/templates/redis/readme.md index 538af9d9..5abcea9d 100644 --- a/openshift/templates/redis/readme.md +++ b/openshift/templates/redis/readme.md @@ -2,16 +2,16 @@ ## Source -* https://artifacthub.io/packages/helm/bitnami/redis +- https://artifacthub.io/packages/helm/bitnami/redis -* https://github.com/bitnami/charts/tree/main/bitnami/redis +- https://github.com/bitnami/charts/tree/main/bitnami/redis ### Install and Version -helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo add bitnami https://charts.bitnami.com/bitnami helm -n 30b186-dev upgrade --install -f ./cthub-dev-values.yaml cthub-redis-dev bitnami/redis --version 18.2.0 -helm -n 30b186-test upgrade --install -f ./cthub-test-values.yaml cthub-redis-test bitnami/redis --version 18.2.0 +helm -n 30b186-test upgrade --install -f ./cthub-test-values.yaml cthub-redis-test bitnami/redis --version 18.2.0 helm -n 30b186-dev uninstall cthub-redis-dev diff --git a/openshift/templates/superset/readme.md b/openshift/templates/superset/readme.md index 9450d684..741f2753 100644 --- a/openshift/templates/superset/readme.md +++ b/openshift/templates/superset/readme.md @@ -1,28 +1,20 @@ - - https://artifacthub.io/packages/helm/superset/superset helm repo add superset http://apache.github.io/superset/ -helm upgrade --install --set +helm upgrade --install --set cthub-superset-dev superset/superset --version 0.10.14 - supersetNode.connections.redis_host=cthub-redis-dev-headless supersetNode.connections.redis_password=xxx supersetNode.connections.db_host=cthub-crunchy-dev-pgbouncer supersetNode.connections.db_user=xxx supersetNode.connections.db_pass=xxx - supersetNode.connections.redis_password=xxxxx\\ - create supersetuser in database update patroni secret to ass superset_username and superset_password create superset user and superset database in crunchy - - - diff --git a/superset/README.md b/superset/README.md index c867121d..329a38f4 100644 --- a/superset/README.md +++ b/superset/README.md @@ -47,8 +47,8 @@ Steps: 1. Create `./docker/requirements-local.txt` 2. Add your new packages 3. Rebuild docker-compose - 1. `docker-compose down -v` - 2. `docker-compose up` + 1. `docker-compose down -v` + 2. `docker-compose up` ## Initializing Database